pax_global_header00006660000000000000000000000064151436024240014513gustar00rootroot0000000000000052 comment=2d4310116dd67ee985819486ba32244d5c273ad6 benoitc-gunicorn-f5fb19e/000077500000000000000000000000001514360242400154635ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/.github/000077500000000000000000000000001514360242400170235ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/.github/DISCUSSION_TEMPLATE/000077500000000000000000000000001514360242400220015ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/.github/DISCUSSION_TEMPLATE/issue-triage.yml000066400000000000000000000064651514360242400251400ustar00rootroot00000000000000title: "[Triage] " labels: - triage body: - type: markdown attributes: value: | Thanks for taking the time to report an issue or suggest a feature! **Before submitting, please:** - Search [existing discussions](https://github.com/benoitc/gunicorn/discussions) and [issues](https://github.com/benoitc/gunicorn/issues) for duplicates - Check the [FAQ](https://gunicorn.org/faq/) and [documentation](https://gunicorn.org/) - type: dropdown id: type attributes: label: Type description: What type of issue is this? options: - Bug Report - Feature Request - Performance Issue - Documentation Issue validations: required: true - type: textarea id: description attributes: label: Description description: A clear description of the issue or feature request placeholder: | For bugs: What happened? What did you expect? For features: What problem does this solve? validations: required: true - type: textarea id: reproduce attributes: label: Steps to Reproduce (for bugs) description: Minimal steps to reproduce the behavior placeholder: | 1. Create a simple app with... 2. Run gunicorn with... 3. Send request... 4. See error... validations: required: false - type: textarea id: config attributes: label: Configuration description: Your gunicorn configuration (command line or config file) render: bash placeholder: | gunicorn --workers 4 --bind 0.0.0.0:8000 myapp:app validations: required: false - type: textarea id: logs attributes: label: Logs / Error Output description: Relevant logs or error messages (use --log-level debug for more detail) render: text validations: required: false - type: input id: gunicorn-version attributes: label: Gunicorn Version description: Output of `gunicorn --version` placeholder: gunicorn 24.1.0 validations: required: true - type: input id: python-version attributes: label: Python Version description: Output of `python --version` placeholder: Python 3.12.0 validations: required: true - type: dropdown id: worker-class attributes: label: Worker Class description: Which worker type are you using? options: - sync (default) - gthread - gevent - eventlet - tornado - asgi (beta) - custom - N/A (feature request) validations: required: true - type: input id: os attributes: label: Operating System description: Your OS and version placeholder: Ubuntu 22.04, macOS 14.0, etc. validations: required: true - type: textarea id: additional attributes: label: Additional Context description: Any other context (proxy setup, Docker, proposed solution, etc.) validations: required: false - type: checkboxes id: checklist attributes: label: Checklist options: - label: I have searched existing discussions and issues for duplicates required: true - label: I have checked the documentation and FAQ required: true benoitc-gunicorn-f5fb19e/.github/DISCUSSION_TEMPLATE/question.yml000066400000000000000000000024061514360242400243750ustar00rootroot00000000000000title: "[Question] " body: - type: markdown attributes: value: | Have a question about Gunicorn? Before asking, please check: - [Documentation](https://gunicorn.org/) - [FAQ](https://gunicorn.org/faq/) - [Settings Reference](https://gunicorn.org/reference/settings/) - [Existing discussions](https://github.com/benoitc/gunicorn/discussions) - type: textarea id: question attributes: label: Question description: What would you like to know? validations: required: true - type: textarea id: context attributes: label: Context description: Any relevant context (your setup, what you've tried, etc.) placeholder: | I'm running gunicorn with... I've tried... validations: required: false - type: textarea id: config attributes: label: Configuration (if relevant) description: Your gunicorn configuration render: bash validations: required: false - type: checkboxes id: checklist attributes: label: Checklist options: - label: I have checked the documentation and FAQ required: true - label: I have searched existing discussions required: true benoitc-gunicorn-f5fb19e/.github/FUNDING.yml000066400000000000000000000001761514360242400206440ustar00rootroot00000000000000github: [benoitc] open_collective: gunicorn custom: ["https://checkout.revolut.com/pay/c934e028-3a71-44eb-b99c-491342df2044"] benoitc-gunicorn-f5fb19e/.github/ISSUE_TEMPLATE/000077500000000000000000000000001514360242400212065ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000006261514360242400232020ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Bug Report / Feature Request url: https://github.com/benoitc/gunicorn/discussions/new?category=issue-triage about: Report a bug or request a feature (triaged before becoming an issue) - name: Question url: https://github.com/benoitc/gunicorn/discussions/new?category=q-a about: Ask a question about configuration, deployment, or usage benoitc-gunicorn-f5fb19e/.github/ISSUE_TEMPLATE/preapproved.md000066400000000000000000000005301514360242400240550ustar00rootroot00000000000000--- name: Pre-Discussed and Approved Topics about: Only for topics already discussed and approved in GitHub Discussions title: '' labels: '' assignees: '' --- **Only for topics already discussed and approved in the GitHub Discussions section.** DO NOT OPEN A NEW ISSUE. PLEASE USE THE DISCUSSIONS SECTION. Link to approved discussion: --- benoitc-gunicorn-f5fb19e/.github/dependabot.yml000066400000000000000000000001671514360242400216570ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" benoitc-gunicorn-f5fb19e/.github/workflows/000077500000000000000000000000001514360242400210605ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/.github/workflows/docker-integration.yml000066400000000000000000000017441514360242400254010ustar00rootroot00000000000000name: Docker Integration Tests on: push: branches: [master] paths: - 'gunicorn/uwsgi/**' - 'tests/docker/uwsgi/**' - '.github/workflows/docker-integration.yml' pull_request: paths: - 'gunicorn/uwsgi/**' - 'tests/docker/uwsgi/**' - '.github/workflows/docker-integration.yml' permissions: contents: read env: FORCE_COLOR: 1 jobs: uwsgi-nginx: name: uWSGI Protocol with nginx runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 with: python-version: "3.12" cache: pip cache-dependency-path: requirements_test.txt - name: Install test dependencies run: | python -m pip install --upgrade pip python -m pip install pytest pytest-cov requests - name: Run uWSGI integration tests run: | pytest tests/docker/uwsgi/ -v --tb=short benoitc-gunicorn-f5fb19e/.github/workflows/docker-publish.yml000066400000000000000000000027151514360242400245230ustar00rootroot00000000000000name: Docker Publish on: push: tags: - 'v*' - '[0-9]+.[0-9]+.[0-9]+' workflow_dispatch: permissions: contents: read packages: write env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} jobs: build-and-push: name: Build and Push Docker Image runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Log in to Container Registry uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Extract metadata id: meta uses: docker/metadata-action@v5 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=raw,value=latest,enable={{is_default_branch}} - name: Build and push uses: docker/build-push-action@v6 with: context: . file: docker/Dockerfile platforms: linux/amd64,linux/arm64 push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max benoitc-gunicorn-f5fb19e/.github/workflows/docs.yml000066400000000000000000000036561514360242400225450ustar00rootroot00000000000000name: Docs on: push: branches: [ master ] paths: - 'docs/**' - 'mkdocs.yml' - 'scripts/build_settings_doc.py' - 'gunicorn/config.py' - 'requirements_dev.txt' - '.github/workflows/docs.yml' pull_request: paths: - 'docs/**' - 'mkdocs.yml' - 'scripts/build_settings_doc.py' - 'gunicorn/config.py' - 'requirements_dev.txt' - '.github/workflows/docs.yml' workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 with: python-version: '3.12' - name: Install dependencies run: | python -m pip install --upgrade pip pip install -e . pip install -r requirements_dev.txt - name: Build documentation run: mkdocs build - name: Upload site artifact uses: actions/upload-artifact@v6 with: name: gunicorn-site path: site retention-days: 7 deploy: if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/master' needs: build runs-on: ubuntu-latest permissions: contents: write steps: - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 with: python-version: '3.12' - name: Install dependencies run: | python -m pip install --upgrade pip pip install -e . pip install -r requirements_dev.txt - name: Build documentation run: mkdocs build - name: Deploy to GitHub Pages uses: peaceiris/actions-gh-pages@v4 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: site publish_branch: gh-pages cname: gunicorn.org commit_message: "docs: deploy ${{ github.sha }}" benoitc-gunicorn-f5fb19e/.github/workflows/embedding-integration.yml000066400000000000000000000016631514360242400260500ustar00rootroot00000000000000name: Embedding Service Integration Tests on: push: paths: - 'examples/embedding_service/**' - 'gunicorn/dirty/**' pull_request: paths: - 'examples/embedding_service/**' - 'gunicorn/dirty/**' jobs: test: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v6 - name: Build and start service run: | cd examples/embedding_service docker compose up -d --build docker compose logs -f & - name: Wait for healthy run: | for i in {1..30}; do curl -s http://127.0.0.1:8000/health && break sleep 2 done - name: Run tests run: | pip install requests numpy python examples/embedding_service/test_embedding.py - name: Cleanup if: always() run: | cd examples/embedding_service docker compose down benoitc-gunicorn-f5fb19e/.github/workflows/freebsd.yml000066400000000000000000000023421514360242400232160ustar00rootroot00000000000000name: FreeBSD on: push: pull_request: workflow_dispatch: permissions: contents: read env: FORCE_COLOR: 1 jobs: test: name: FreeBSD ${{ matrix.freebsd-version }} / Python ${{ matrix.python-version }} runs-on: ubuntu-latest timeout-minutes: 30 strategy: fail-fast: false matrix: include: - freebsd-version: '14.2' python-version: '3.12' python-pkg: 'python312 py312-sqlite3' - freebsd-version: '14.2' python-version: '3.13' python-pkg: 'python313 py313-sqlite3' steps: - uses: actions/checkout@v6 - name: Test on FreeBSD uses: vmactions/freebsd-vm@v1 with: release: ${{ matrix.freebsd-version }} usesh: true prepare: | pkg install -y ${{ matrix.python-pkg }} run: | python${{ matrix.python-version }} -m venv venv . venv/bin/activate pip install --upgrade pip pip install pytest pytest-cov pytest-asyncio coverage pip install -e . pytest --cov=gunicorn -v tests/ \ --ignore=tests/workers/test_ggevent.py \ --ignore=tests/workers/test_geventlet.py benoitc-gunicorn-f5fb19e/.github/workflows/lint.yml000066400000000000000000000033751514360242400225610ustar00rootroot00000000000000name: lint on: [push, pull_request] permissions: contents: read # to fetch code (actions/checkout) env: # note that some tools care only for the name, not the value FORCE_COLOR: 1 jobs: lint: name: ${{ matrix.python-version }} / tox-${{ matrix.toxenv || '(other)' }} timeout-minutes: 10 runs-on: ubuntu-latest strategy: fail-fast: false matrix: toxenv: [lint, pycodestyle] python-version: [ "3.12" ] include: # for actions that want git env, not tox env - toxenv: null python-version: "3.12" steps: - uses: actions/checkout@v6 - name: Using Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} cache: pip - name: Install Dependencies (tox) if: ${{ matrix.toxenv }} run: | python -m pip install --upgrade pip python -m pip install tox - run: tox -e ${{ matrix.toxenv }} if: ${{ matrix.toxenv }} - name: Install Dependencies (non-toxic) if: ${{ ! matrix.toxenv }} run: | python -m pip install --upgrade pip python -m pip install -e . - name: "Check generated docs" if: ${{ ! matrix.toxenv }} run: | # Regenerate settings.md and check for uncommitted changes python scripts/build_settings_doc.py if unclean=$(git status --untracked-files=no --porcelain) && [ -z "$unclean" ]; then echo "no uncommitted changes in working tree (as it should be)" else echo "did you forget to run 'python scripts/build_settings_doc.py'?" echo "$unclean" git diff exit 2 fi benoitc-gunicorn-f5fb19e/.github/workflows/tox.yml000066400000000000000000000035001514360242400224130ustar00rootroot00000000000000name: tox on: [push, pull_request] permissions: contents: read # to fetch code (actions/checkout) env: # note that some tools care only for the name, not the value FORCE_COLOR: 1 jobs: tox: name: ${{ matrix.os }} / ${{ matrix.python-version }} # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idtimeout-minutes timeout-minutes: 20 runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: unsupported: [false] os: - ubuntu-latest # Not testing Windows: tests need Unix-only fcntl, grp, pwd, etc. # FreeBSD: tested in separate freebsd.yml workflow python-version: # Supporting Python 3.10 through 3.13 - "3.10" - "3.11" - "3.12" - "3.13" - "pypy-3.10" include: # Test on macos-latest (arm64) with recent versions - os: macos-latest python-version: "3.12" unsupported: false - os: macos-latest python-version: "3.13" unsupported: false steps: - uses: actions/checkout@v6 - name: Using Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} cache: pip cache-dependency-path: requirements_test.txt check-latest: true allow-prereleases: ${{ matrix.unsupported }} - name: Install Dependencies run: | python -m pip install --upgrade pip python -m pip install tox - run: tox -e run-module continue-on-error: ${{ matrix.unsupported }} - run: tox -e run-entrypoint continue-on-error: ${{ matrix.unsupported }} - run: tox -e py continue-on-error: ${{ matrix.unsupported }} benoitc-gunicorn-f5fb19e/.gitignore000077500000000000000000000004501514360242400174550ustar00rootroot00000000000000*.egg *.egg-info *.pyc *.so .coverage .pytest_cache .tox __pycache__ build docs/_build coverage.xml dist examples/frameworks/django/testing/testdb.sql examples/frameworks/pylonstest/PasteScript* examples/frameworks/pylonstest/pylonstest.egg-info/ MANIFEST nohup.out setuptools-* site/ docs/site/ benoitc-gunicorn-f5fb19e/.pylintrc000066400000000000000000000023501514360242400173300ustar00rootroot00000000000000[MASTER] ignore= build, docs, examples, scripts, _compat.py, _gaiohttp.py, [MESSAGES CONTROL] disable= attribute-defined-outside-init, bad-mcs-classmethod-argument, bare-except, broad-except, cyclic-import, duplicate-bases, duplicate-code, eval-used, fixme, import-error, import-outside-toplevel, import-self, inconsistent-return-statements, invalid-name, missing-docstring, no-else-return, no-member, no-self-argument, no-staticmethod-decorator, not-callable, possibly-used-before-assignment, protected-access, raise-missing-from, redefined-outer-name, too-few-public-methods, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-positional-arguments, too-many-public-methods, too-many-statements, used-before-assignment, wrong-import-position, wrong-import-order, ungrouped-imports, unused-argument, useless-object-inheritance, useless-import-alias, comparison-with-callable, try-except-raise, consider-using-with, consider-using-f-string, unspecified-encoding benoitc-gunicorn-f5fb19e/CONTRIBUTING.md000066400000000000000000000163161514360242400177230ustar00rootroot00000000000000# Contributing to Gunicorn Want to hack on Gunicorn? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels wrong or incomplete. ## Contribution guidelines ### Pull requests are always welcome We are always thrilled to receive pull requests, and do our best to process them as fast as possible. Not sure if that typo is worth a pull request? Do it! We will appreciate it. If your pull request is not accepted on the first try, don't be discouraged! If there's a problem with the implementation, hopefully you received feedback on what to improve. We're trying very hard to keep Gunicorn lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement that feature *on top of* Gunicorn. ### Start with a Discussion We use [GitHub Discussions](https://github.com/benoitc/gunicorn/discussions) as the starting point for all bug reports, feature requests, and questions. This allows for proper triage before creating formal issues. - **Bug reports**: Start in [Q&A](https://github.com/benoitc/gunicorn/discussions/categories/q-a) - **Feature requests**: Start in [Ideas](https://github.com/benoitc/gunicorn/discussions/categories/ideas) - **Questions**: Start in [Q&A](https://github.com/benoitc/gunicorn/discussions/categories/q-a) After discussion and triage, maintainers will create issues for confirmed bugs and approved features. ### Check for existing discussions first! Please take a moment to check that a discussion or issue doesn't already exist documenting your bug report or improvement proposal. If it does, it never hurts to add a quick "+1" or "I have this problem too". This will help prioritize the most common problems and requests. ### Conventions Don't comment on closed issues or PRs, instead open a new issue and link it to the old one. Fork the repo and make changes on your fork in a feature branch: - If it's a bugfix branch, name it XXX-something where XXX is the number of the issue - If it's a feature branch, create an enhancement issue to announce your intentions, and name it XXX-something where XXX is the number of the issue. Submit unit tests for your changes. Python has a great test framework built in; use it! Take a look at existing tests for inspiration. Run the full test suite on your branch before submitting a pull request. Make sure you include relevant updates or additions to documentation when creating or modifying features. If you are adding a new configuration option or updating an existing one, please do it in `gunicorn/config.py`, then run `make -C docs html` to update `docs/source/settings.rst`. Write clean code. Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be sure to post a comment after pushing. The new commits will show up in the pull request automatically, but the reviewers will not be notified unless you comment. Before the pull request is merged, make sure that you squash your commits into logical units of work using `git rebase -i` and `git push -f`. After every commit the test suite should be passing. Include documentation changes in the same commit so that a revert would remove all traces of the feature or fix. Commits that fix or close an issue should include a reference like `Closes #XXX` or `Fixes #XXX`, which will automatically close the issue when merged. Add your name to the THANKS file, but make sure the list is sorted and your name and email address match your git configuration. The THANKS file is regenerated occasionally from the git commit history, so a mismatch may result in your changes being overwritten. ## Decision process ### How are decisions made? Short answer: with pull requests to the gunicorn repository. Gunicorn is an open-source project under the MIT License with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project, including its philosophy, design, roadmap and APIs. *If it's part of the project, it's in the repo. It's in the repo, it's part of the project.* As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to the API specification. A philosophy change is a change to the relevant documentation. And so on. All decisions affecting gunicorn, big and small, follow the same 3 steps: * Step 1: Open a pull request. Anyone can do this. * Step 2: Discuss the pull request. Anyone can do this. * Step 3: Accept or refuse a pull request. The relevant maintainer does this (see below "Who decides what?") ### Who decides what? So all decisions are pull requests, and the relevant maintainer makes the decision by accepting or refusing the pull request. But how do we identify the relevant maintainer for a given pull request? Gunicorn follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Benoit Chesneau (aka benoitc), in the role of BDFL. This means that all decisions are made by default by me. Since making every decision myself would be highly unscalable, in practice decisions are spread across multiple maintainers. The relevant maintainer for a pull request is assigned in 3 steps: * Step 1: Determine the subdirectory affected by the pull request. This might be src/registry, docs/source/api, or any other part of the repo. * Step 2: Find the MAINTAINERS file which affects this directory. If the directory itself does not have a MAINTAINERS file, work your way up the repo hierarchy until you find one. * Step 3: The first maintainer listed is the primary maintainer who is assigned the Pull Request. The primary maintainer can reassign a Pull Request to other listed maintainers. ### I'm a maintainer, should I make pull requests too? Primary maintainers are not required to create pull requests when changing their own subdirectory, but secondary maintainers are. ### Who assigns maintainers? benoitc. ### How can I become a maintainer? * Step 1: learn the component inside out * Step 2: make yourself useful by contributing code, bugfixes, support etc. * Step 3: volunteer on our [Libera Chat](https://libera.chat/) irc channel [#gunicorn](https://web.libera.chat/?channels=#gunicorn) Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. You don't have to be a maintainer to make a difference on the project! ### What are a maintainer's responsibility? It is every maintainer's responsibility to: * 1) Expose a clear roadmap for improving their component. * 2) Deliver prompt feedback and decisions on pull requests. * 3) Be available to anyone with questions, bug reports, criticism etc. on their component. This includes irc, github requests and the mailing list. * 4) Make sure their component respects the philosophy, design and roadmap of the project. ### How is this process changed? Just like everything else: by making a pull request :) benoitc-gunicorn-f5fb19e/LICENSE000066400000000000000000000021601514360242400164670ustar00rootroot000000000000002009-2026 (c) Benoît Chesneau 2009-2015 (c) Paul J. Davis Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. benoitc-gunicorn-f5fb19e/MAINTAINERS000066400000000000000000000011761514360242400171650ustar00rootroot00000000000000Core maintainers ================ Benoit Chesneau Konstantin Kapustin Randall Leeds Berker Peksağ Jason Madden Brett Randall Alumni ====== This list contains maintainers that are no longer active on the project. It is thanks to these people that the project has become what it is today. Thank you! Paul J. Davis Kenneth Reitz Nikolay Kim Andrew Svetlov Stéphane Wirtel benoitc-gunicorn-f5fb19e/MANIFEST.in000066400000000000000000000006441514360242400172250ustar00rootroot00000000000000include .gitignore include LICENSE include NOTICE include README.md include THANKS include requirements_dev.txt include requirements_test.txt include tox.ini include .pylintrc recursive-include tests * recursive-include examples * recursive-include docs * recursive-include examples/frameworks * recursive-exclude * __pycache__ recursive-exclude docs/build * recursive-exclude docs/_build * recursive-exclude * *.py[co] benoitc-gunicorn-f5fb19e/Makefile000066400000000000000000000005211514360242400171210ustar00rootroot00000000000000build: virtualenv venv venv/bin/pip install -e . venv/bin/pip install -r requirements_dev.txt docs: mkdocs build docs-serve: mkdocs serve clean: @rm -rf .Python MANIFEST build dist venv* *.egg-info *.egg @find . -type f -name "*.py[co]" -delete @find . -type d -name "__pycache__" -delete .PHONY: build clean docs docs-serve benoitc-gunicorn-f5fb19e/NOTICE000066400000000000000000000073011514360242400163700ustar00rootroot00000000000000Gunicorn 2009-2026 (c) Benoît Chesneau 2009-2015 (c) Paul J. Davis Gunicorn is released under the MIT license. See the LICENSE file for the complete license. gunicorn.logging_config ----------------------- Copyright 2001-2005 by Vinay Sajip. All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Vinay Sajip not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. gunicorn.debug -------------- Based on eventlet.debug module under MIT license: Unless otherwise noted, the files in Eventlet are under the following MIT license: Copyright (c) 2005-2006, Bob Ippolito Copyright (c) 2007-2010, Linden Research, Inc. Copyright (c) 2008-2010, Eventlet Contributors (see Eventlet AUTHORS) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. gunicorn.reloader ----------------- Based on greins.reloader module under MIT license: 2010 (c) Meebo, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. util/unlink.py -------------- backport from python3 Lib/test/support.py benoitc-gunicorn-f5fb19e/README.md000066400000000000000000000056411514360242400167500ustar00rootroot00000000000000# Gunicorn

Gunicorn is maintained by volunteers. If it powers your production, please consider supporting us:
GitHub Sponsors Open Collective Revolut

[![PyPI version](https://img.shields.io/pypi/v/gunicorn.svg?style=flat)](https://pypi.python.org/pypi/gunicorn) [![Supported Python versions](https://img.shields.io/pypi/pyversions/gunicorn.svg)](https://pypi.python.org/pypi/gunicorn) [![Build Status](https://github.com/benoitc/gunicorn/actions/workflows/tox.yml/badge.svg)](https://github.com/benoitc/gunicorn/actions/workflows/tox.yml) Gunicorn 'Green Unicorn' is a Python WSGI HTTP Server for UNIX. It's a pre-fork worker model ported from Ruby's [Unicorn](https://bogomips.org/unicorn/) project. The Gunicorn server is broadly compatible with various web frameworks, simply implemented, light on server resource usage, and fairly speedy. **New in v25**: Per-app worker allocation for dirty arbiters, HTTP/2 support (beta)! ## Quick Start ```bash pip install gunicorn gunicorn myapp:app --workers 4 ``` For ASGI applications (FastAPI, Starlette): ```bash gunicorn myapp:app --worker-class asgi ``` ## Features - WSGI support for Django, Flask, Pyramid, and any WSGI framework - **ASGI support** for FastAPI, Starlette, Quart - **HTTP/2 support** (beta) with multiplexed streams - **Dirty Arbiters** (beta) for heavy workloads (ML models, long-running tasks) - uWSGI binary protocol for nginx integration - Multiple worker types: sync, gthread, gevent, eventlet, asgi - Graceful worker process management - Compatible with Python 3.9+ ## Documentation Full documentation at https://gunicorn.org - [Quickstart](https://gunicorn.org/quickstart/) - [Configuration](https://gunicorn.org/configure/) - [Deployment](https://gunicorn.org/deploy/) - [Settings Reference](https://gunicorn.org/reference/settings/) ## Community - Report bugs on [GitHub Issues](https://github.com/benoitc/gunicorn/issues) - Chat in [#gunicorn](https://web.libera.chat/?channels=#gunicorn) on [Libera.chat](https://libera.chat/) - See [CONTRIBUTING.md](CONTRIBUTING.md) for contribution guidelines ## Support Powering Python apps since 2010. Support continued development. [![Become a Sponsor](https://img.shields.io/badge/Become_a_Sponsor-❤-ff69b4)](https://gunicorn.org/sponsor/) ## License Gunicorn is released under the MIT License. See the [LICENSE](https://github.com/benoitc/gunicorn/blob/master/LICENSE) file for details. benoitc-gunicorn-f5fb19e/SECURITY.md000066400000000000000000000021751514360242400172610ustar00rootroot00000000000000# Security Policy ## Reporting a Vulnerability **Please note that public Github issues are open for everyone to see!** If you believe you are found a problem in Gunicorn software, examples or documentation, we encourage you to send your report privately via [email](mailto:security@gunicorn.org?subject=Security%20issue%20in%20Gunicorn), or via Github using the *Report a vulnerability* button in the [Security](https://github.com/benoitc/gunicorn/security) section. ## Supported Releases Please target reports against :white_check_mark: or current master. Please understand that :x: will not receive further security attention. | Version | Status | | ------- | ------------------ | | 25.0.0 | :white_check_mark: | | 24.1.1 | :white_check_mark: | | 23.0.0 | :x: | | 22.0.0 | :x: | | < 22.0 | :x: | ## Python Versions Gunicorn runs on Python 3.10+, supporting Python versions that are still maintained by the PSF. We *highly recommend* the latest release of a [supported series](https://devguide.python.org/versions/) and will not prioritize issues affecting EoL environments. benoitc-gunicorn-f5fb19e/THANKS000066400000000000000000000161701514360242400164030ustar00rootroot00000000000000Gunicorn THANKS =============== A number of people have contributed to Gunicorn by reporting problems, suggesting improvements or submitting changes. Some of these people are: 414nch4n Aaron Kavlie aartur Adnane Belmadiaf Adrien CLERC Alasdair Nicol Alex Conrad Alex Gaynor Alex Robbins Alexandre Zani Alexis Le-Quoc Anand Chitipothu Andreas Stührk Andrew Burdo Andrew Svetlov Anil V Antoine Girard Anton Vlasenko Artur Kruchinin Bartosz Oler Ben Cochran Ben Oswald Benjamin Gilbert Benny Mei Benoit Chesneau Berker Peksag bninja Bob Hagemann Bobby Beckmann Brett Randall Brian Rosner Bruno Bigras Caleb Brown Chris Adams Chris Forbes Chris Lamb Chris Streeter Christian Clauss Christoph Heer Christos Stavrakakis CMGS Curt Micol Dan Callaghan Dan Sully Daniel Quinn Dariusz Suchojad David Black David Vincelli David Wolever Denis Bilenko Diego Oliveira Dima Barsky Djoume Salvetti Dmitry Medvinsky Dominik Działak Dustin Ingram Ed Morley Eric Florenzano Eric Shull Eugene Obukhov Evan Mezeske Florian Apolloner Gaurav Kumar George Kollias George Notaras German Larrain Graham Dumpleton Graham Dumpleton Greg McGuire Greg Taylor Hasan Ramezani Hebert J Hobson Lane Hugo van Kemenade Igor Petrov INADA Naoki Jakub Paweł Głazik Jan-Philip Gehrcke Jannis Leidel Jason Jones Jason Madden jean-philippe serafin Jeremy Volkman Jeroen Pulles Jeryn Mathew Jet Sun Jim Garrison Johan Bergström John Hensley Jonas Haag Jonas Nockert Jorge Niedbalski Jorge Niedbalski R Justin Quick keakon Keegan Carruthers-Smith Kenneth Reitz Kevin Gessner Kevin Littlejohn Kevin Luikens Kirill Zaborsky Konstantin Kapustin kracekumar Kristian Glass Kristian Øllegaard Krystian Krzysztof Urbaniak Kyle Kelley Kyle Mulka Lars Hansson Leonardo Santagada Levi Gross licunlong Łukasz Kucharski Mahmoud Hashemi Malthe Borch Marc Abramowitz Marc Abramowitz Mark Adams Matt Behrens Matt Billenstein Matt Good Matt Robenolt Maxim Kamenkov Mazdak Rezvani Michael Schurter Mieszko Mike Tigas Moriyoshi Koizumi mpaolini Neil Chintomby Neil Williams Nick Pillitteri Nik Nyby Nikolay Kim Oliver Allen Oliver Bristow Oliver Tonnhofer Omer Katz PA Parent Paul Jeannot Paul Davis Paul J. Davis Paul Smith Phil Schanely Philip Cristiano Philipp Saveliev Prateek Singh Paudel py Qiangning Hong Randall Leeds Randall Leeds Randall Leeds Raphaël Slinckx Rhys Powell Rik Ronan Amicel Ryan Peck Ryuichi Watanabe Saeed Gharedaghi Samuel Matos Sergey Rublev Shane Reustle shouse-cars sib Simon Lundmark Stephane Wirtel Stephen DiCato Stephen Holsapple Steven Cummings sylt Sébastien Fievet Tal Einat <532281+taleinat@users.noreply.github.com> Talha Malik TedWantsMore Teko012 <112829523+Teko012@users.noreply.github.com> Thomas Grainger Thomas Steinacher Travis Cline Travis Swicegood Trey Long W. Trevor King Wojtek Wolfgang Schnerring WoLpH wong2 WooParadog Xie Shi Yue Du zakdances Emile Fugulin benoitc-gunicorn-f5fb19e/appveyor.yml000066400000000000000000000026261514360242400200610ustar00rootroot00000000000000version: '{branch}.{build}' environment: matrix: - TOXENV: lint PYTHON: "C:\\Python312-x64" - TOXENV: pycodestyle PYTHON: "C:\\Python312-x64" # Windows cannot even import the module when they unconditionally import, see below. #- TOXENV: run-module # PYTHON: "C:\\Python38-x64" #- TOXENV: run-entrypoint # PYTHON: "C:\\Python38-x64" # Windows is not ready for testing!!! # Python's fcntl, grp, pwd, os.geteuid(), and socket.AF_UNIX are all Unix-only. #- TOXENV: py35 # PYTHON: "C:\\Python35-x64" #- TOXENV: py36 # PYTHON: "C:\\Python36-x64" #- TOXENV: py37 # PYTHON: "C:\\Python37-x64" #- TOXENV: py38 # PYTHON: "C:\\Python38-x64" #- TOXENV: py39 # PYTHON: "C:\\Python39-x64" #- TOXENV: py310 # PYTHON: "C:\\Python310-x64" #- TOXENV: py311 # PYTHON: "C:\\Python311-x64" #- TOXENV: py312 # PYTHON: "C:\\Python312-x64" matrix: allow_failures: # No failures expected for py312 and py313 init: - SET "PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" install: - pip install tox build: false test_script: - tox cache: # Not including the .tox directory since it takes longer to download/extract # the cache archive than for tox to clean install from the pip cache. - '%LOCALAPPDATA%\pip\Cache -> tox.ini' notifications: - provider: Email on_build_success: false on_build_status_changed: false benoitc-gunicorn-f5fb19e/benchmarks/000077500000000000000000000000001514360242400176005ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/benchmarks/baseline.json000066400000000000000000000001671514360242400222610ustar00rootroot00000000000000{ "gthread": { "simple": {}, "simple_high_concurrency": {}, "slow_io": {}, "large_response": {} } }benoitc-gunicorn-f5fb19e/benchmarks/dirty_bench_app.py000066400000000000000000000144501514360242400233100ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Benchmark DirtyApp for stress testing the dirty arbiter pool. Provides configurable workloads for testing: - Pure sleep (scheduling overhead) - CPU-bound work (thread pool utilization) - Mixed I/O + CPU (realistic workloads) - Payload generation (serialization overhead) """ import time from gunicorn.dirty import DirtyApp class BenchmarkApp(DirtyApp): """ Configurable benchmark app for stress testing. Provides various task types to test different aspects of the dirty pool performance. """ def init(self): """Fast initialization - no heavy resources to load.""" self.call_count = 0 self.total_sleep_ms = 0 self.total_cpu_ms = 0 def sleep_task(self, duration_ms): """ Pure sleep task - tests scheduling overhead. This simulates I/O-bound work like waiting for external APIs. The thread is blocked but not consuming CPU. Args: duration_ms: Sleep duration in milliseconds Returns: dict with sleep duration """ self.call_count += 1 self.total_sleep_ms += duration_ms time.sleep(duration_ms / 1000.0) return {"slept_ms": duration_ms} def cpu_task(self, duration_ms, intensity=1.0): """ CPU-bound work - tests thread pool utilization. Performs actual computation to simulate CPU-intensive work like model inference or data processing. Args: duration_ms: Target duration in milliseconds intensity: Work intensity multiplier (1.0 = normal) Returns: dict with computed iterations and actual duration """ self.call_count += 1 start = time.perf_counter() target_end = start + (duration_ms / 1000.0) # Perform CPU work until target duration iterations = 0 work_per_iteration = int(1000 * intensity) while time.perf_counter() < target_end: # Do some actual computation x = 0.0 for i in range(work_per_iteration): x += i * 0.001 x = x * 1.001 if x < 1000000 else x * 0.999 iterations += 1 actual_ms = (time.perf_counter() - start) * 1000 self.total_cpu_ms += actual_ms return { "iterations": iterations, "target_ms": duration_ms, "actual_ms": round(actual_ms, 2), "intensity": intensity } def mixed_task(self, sleep_ms, cpu_ms, intensity=1.0): """ Mixed I/O + CPU task - simulates realistic workloads. First performs I/O (sleep), then does CPU work. This is common in real apps: fetch data, then process it. Args: sleep_ms: I/O simulation duration in milliseconds cpu_ms: CPU work duration in milliseconds intensity: CPU work intensity multiplier Returns: dict with both sleep and CPU metrics """ self.call_count += 1 # I/O phase (sleep) time.sleep(sleep_ms / 1000.0) self.total_sleep_ms += sleep_ms # CPU phase start = time.perf_counter() target_end = start + (cpu_ms / 1000.0) iterations = 0 work_per_iteration = int(1000 * intensity) while time.perf_counter() < target_end: x = 0.0 for i in range(work_per_iteration): x += i * 0.001 x = x * 1.001 if x < 1000000 else x * 0.999 iterations += 1 actual_cpu_ms = (time.perf_counter() - start) * 1000 self.total_cpu_ms += actual_cpu_ms return { "sleep_ms": sleep_ms, "cpu_iterations": iterations, "target_cpu_ms": cpu_ms, "actual_cpu_ms": round(actual_cpu_ms, 2), "total_ms": round(sleep_ms + actual_cpu_ms, 2) } def payload_task(self, size_bytes, duration_ms=0): """ Generate payload of specified size - tests serialization. Creates a deterministic payload to test JSON serialization overhead for different response sizes. Args: size_bytes: Target payload size in bytes duration_ms: Optional sleep before generating payload Returns: dict with 'data' field of specified size """ self.call_count += 1 if duration_ms > 0: time.sleep(duration_ms / 1000.0) self.total_sleep_ms += duration_ms # Generate payload - use a pattern that compresses differently # than pure repeated characters for more realistic testing pattern = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" repeats = (size_bytes // len(pattern)) + 1 data = (pattern * repeats)[:size_bytes] return { "data": data, "size": len(data) } def echo_task(self, payload): """ Echo back payload - tests round-trip serialization. Useful for testing request/response serialization together. Args: payload: Data to echo back Returns: dict with echoed payload and its size """ self.call_count += 1 # Calculate size based on type if isinstance(payload, str): size = len(payload) elif isinstance(payload, (dict, list)): import json size = len(json.dumps(payload)) else: size = len(str(payload)) return { "echoed_size": size, "payload": payload } def stats(self): """ Return accumulated statistics. Returns: dict with call counts and totals """ return { "call_count": self.call_count, "total_sleep_ms": self.total_sleep_ms, "total_cpu_ms": round(self.total_cpu_ms, 2) } def reset_stats(self): """Reset accumulated statistics.""" self.call_count = 0 self.total_sleep_ms = 0 self.total_cpu_ms = 0 return {"reset": True} def health(self): """Health check endpoint for warmup.""" return {"status": "ok"} def close(self): """Cleanup on shutdown.""" pass benoitc-gunicorn-f5fb19e/benchmarks/dirty_bench_gunicorn.py000066400000000000000000000024701514360242400243530ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn configuration for dirty pool integration benchmarks. Usage: gunicorn -c benchmarks/dirty_bench_gunicorn.py \ benchmarks.dirty_bench_wsgi:app """ # Bind address bind = "127.0.0.1:8000" # HTTP worker configuration workers = 4 worker_class = "gthread" threads = 4 worker_connections = 1000 # Dirty pool configuration dirty_apps = ["benchmarks.dirty_bench_app:BenchmarkApp"] dirty_workers = 4 dirty_threads = 1 dirty_timeout = 300 dirty_graceful_timeout = 30 # Logging accesslog = "-" errorlog = "-" loglevel = "info" # Timeouts timeout = 120 graceful_timeout = 30 keepalive = 2 # Lifecycle hooks def on_dirty_starting(arbiter): """Called when dirty arbiter is starting.""" print(f"[dirty] Arbiter starting (pid: {arbiter.pid})") def dirty_post_fork(arbiter, worker): """Called after dirty worker fork.""" print(f"[dirty] Worker {worker.pid} forked") def dirty_worker_init(worker): """Called after dirty worker apps are initialized.""" print(f"[dirty] Worker {worker.pid} initialized with apps: " f"{list(worker.apps.keys())}") def dirty_worker_exit(arbiter, worker): """Called when dirty worker exits.""" print(f"[dirty] Worker {worker.pid} exiting") benoitc-gunicorn-f5fb19e/benchmarks/dirty_bench_wsgi.py000066400000000000000000000127451514360242400235060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WSGI app for integration benchmarking of the dirty pool. This simple WSGI application calls the dirty pool and returns results. Use with gunicorn for end-to-end benchmarking that includes HTTP overhead. Example: gunicorn benchmarks.dirty_bench_wsgi:app \ --workers 4 \ --dirty-app benchmarks.dirty_bench_app:BenchmarkApp \ --dirty-workers 2 \ --bind 127.0.0.1:8000 """ import json from urllib.parse import parse_qs from gunicorn.dirty import get_dirty_client # Default benchmark app path BENCHMARK_APP = "benchmarks.dirty_bench_app:BenchmarkApp" def app(environ, start_response): """ WSGI application that calls dirty pool tasks. Query parameters: action: Task action to call (default: sleep_task) duration: Duration in ms for sleep/cpu tasks (default: 10) sleep: Sleep duration for mixed_task (default: 50) cpu: CPU duration for mixed_task (default: 50) size: Payload size in bytes for payload_task (default: 100) intensity: CPU intensity for cpu/mixed tasks (default: 1.0) app: Dirty app path (default: benchmarks.dirty_bench_app:BenchmarkApp) Endpoints: / - Default sleep_task /sleep - sleep_task with ?duration=N /cpu - cpu_task with ?duration=N&intensity=N /mixed - mixed_task with ?sleep=N&cpu=N /payload - payload_task with ?size=N /echo - echo_task (POST body echoed) /stats - Get accumulated stats /health - Health check """ path = environ.get('PATH_INFO', '/') method = environ.get('REQUEST_METHOD', 'GET') query = parse_qs(environ.get('QUERY_STRING', '')) # Helper to get query params with defaults def get_param(name, default, type_fn=int): values = query.get(name, []) if values: try: return type_fn(values[0]) except (ValueError, TypeError): return default return default # Get app path from query or use default app_path = query.get('app', [BENCHMARK_APP])[0] try: client = get_dirty_client() # Route based on path if path in ('/', '/sleep'): duration = get_param('duration', 10) result = client.execute(app_path, "sleep_task", duration) elif path == '/cpu': duration = get_param('duration', 100) intensity = get_param('intensity', 1.0, float) result = client.execute(app_path, "cpu_task", duration, intensity) elif path == '/mixed': sleep_ms = get_param('sleep', 50) cpu_ms = get_param('cpu', 50) intensity = get_param('intensity', 1.0, float) result = client.execute(app_path, "mixed_task", sleep_ms, cpu_ms, intensity) elif path == '/payload': size = get_param('size', 100) duration = get_param('duration', 0) result = client.execute(app_path, "payload_task", size, duration) elif path == '/echo': # Read request body for echo try: content_length = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): content_length = 0 if content_length > 0: body = environ['wsgi.input'].read(content_length) try: payload = json.loads(body.decode('utf-8')) except (json.JSONDecodeError, UnicodeDecodeError): payload = body.decode('utf-8', errors='replace') else: payload = "" result = client.execute(app_path, "echo_task", payload) elif path == '/stats': result = client.execute(app_path, "stats") elif path == '/reset': result = client.execute(app_path, "reset_stats") elif path == '/health': result = client.execute(app_path, "health") else: # Unknown path - return 404 status = '404 Not Found' body = json.dumps({"error": f"Unknown path: {path}"}).encode() headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))), ] start_response(status, headers) return [body] # Success response status = '200 OK' body = json.dumps(result).encode() headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))), ] start_response(status, headers) return [body] except Exception as e: # Error response status = '500 Internal Server Error' error_msg = {"error": str(e), "type": type(e).__name__} body = json.dumps(error_msg).encode() headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))), ] start_response(status, headers) return [body] # Gunicorn configuration for integration testing # These can be overridden on the command line # Example gunicorn invocation: # gunicorn benchmarks.dirty_bench_wsgi:app \ # -c benchmarks/dirty_bench_gunicorn.py \ # --dirty-app benchmarks.dirty_bench_app:BenchmarkApp \ # --dirty-workers 2 def post_fork(server, worker): """Hook called after worker fork.""" pass benoitc-gunicorn-f5fb19e/benchmarks/dirty_benchmark.py000077500000000000000000001037271514360242400233340ustar00rootroot00000000000000#!/usr/bin/env python3 # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Pool Benchmark Runner Stress tests and benchmarks the dirty arbiter pool to find bottlenecks and optimization opportunities. Test Modes: - Isolated: Direct client -> arbiter -> worker (no HTTP overhead) - Integrated: HTTP workers calling dirty pool (realistic end-to-end) Usage: # Quick smoke test python benchmarks/dirty_benchmark.py --quick # Full isolated suite python benchmarks/dirty_benchmark.py --isolated --output results.json # Specific scenario python benchmarks/dirty_benchmark.py \ --duration 100 \ --concurrency 50 \ --workers 4 \ --threads 2 # Payload size tests python benchmarks/dirty_benchmark.py --payload-tests # Integration tests (requires gunicorn running) python benchmarks/dirty_benchmark.py --integrated --url http://127.0.0.1:8000 """ import argparse import asyncio import json import multiprocessing import os import signal import statistics import subprocess import sys import tempfile import threading import time from concurrent.futures import ThreadPoolExecutor, as_completed from dataclasses import dataclass, field, asdict from pathlib import Path from typing import Any # Add parent to path for imports BENCHMARK_DIR = Path(__file__).parent sys.path.insert(0, str(BENCHMARK_DIR.parent)) from gunicorn.dirty.client import DirtyClient from gunicorn.dirty.arbiter import DirtyArbiter # Default benchmark app path BENCHMARK_APP = "benchmarks.dirty_bench_app:BenchmarkApp" @dataclass class LatencyStats: """Latency statistics in milliseconds.""" min: float = 0.0 max: float = 0.0 mean: float = 0.0 stddev: float = 0.0 p50: float = 0.0 p95: float = 0.0 p99: float = 0.0 @classmethod def from_samples(cls, samples: list[float]) -> "LatencyStats": """Calculate statistics from list of latency samples.""" if not samples: return cls() sorted_samples = sorted(samples) n = len(sorted_samples) return cls( min=sorted_samples[0], max=sorted_samples[-1], mean=statistics.mean(sorted_samples), stddev=statistics.stdev(sorted_samples) if n > 1 else 0.0, p50=sorted_samples[int(n * 0.50)], p95=sorted_samples[int(n * 0.95)] if n >= 20 else sorted_samples[-1], p99=sorted_samples[int(n * 0.99)] if n >= 100 else sorted_samples[-1], ) @dataclass class BenchmarkResult: """Results from a single benchmark run.""" scenario: str config: dict total_requests: int = 0 successful: int = 0 failed: int = 0 errors: list[str] = field(default_factory=list) duration_sec: float = 0.0 requests_per_sec: float = 0.0 latency_ms: LatencyStats = field(default_factory=LatencyStats) def to_dict(self) -> dict: """Convert to dictionary for JSON serialization.""" d = asdict(self) d['latency_ms'] = asdict(self.latency_ms) return d class MockConfig: """Mock gunicorn config for standalone arbiter testing.""" def __init__( self, dirty_apps: list[str], dirty_workers: int = 2, dirty_threads: int = 1, dirty_timeout: int = 300, dirty_graceful_timeout: int = 30, ): self.dirty_apps = dirty_apps self.dirty_workers = dirty_workers self.dirty_threads = dirty_threads self.dirty_timeout = dirty_timeout self.dirty_graceful_timeout = dirty_graceful_timeout # Other required config self.env = {} self.uid = os.getuid() self.gid = os.getgid() self.initgroups = False self.proc_name = "dirty-benchmark" # WorkerTmp requirements self.umask = 0 self.worker_tmp_dir = None # Hook stubs def on_dirty_starting(self, arbiter): pass def dirty_post_fork(self, arbiter, worker): pass def dirty_worker_init(self, worker): pass def dirty_worker_exit(self, arbiter, worker): pass class MockLogger: """Mock logger for standalone testing.""" def __init__(self, verbose: bool = False): self.verbose = verbose def debug(self, msg, *args): if self.verbose: print(f"[DEBUG] {msg % args if args else msg}") def info(self, msg, *args): if self.verbose: print(f"[INFO] {msg % args if args else msg}") def warning(self, msg, *args): print(f"[WARN] {msg % args if args else msg}") def error(self, msg, *args): print(f"[ERROR] {msg % args if args else msg}") def critical(self, msg, *args): print(f"[CRIT] {msg % args if args else msg}") def exception(self, msg, *args): print(f"[EXC] {msg % args if args else msg}") def reopen_files(self): pass def close_on_exec(self): pass class IsolatedBenchmark: """ Run benchmarks directly against the dirty pool without HTTP. Spawns a standalone dirty arbiter and workers, then runs concurrent clients to measure performance. """ def __init__( self, dirty_workers: int = 2, dirty_threads: int = 1, dirty_timeout: int = 300, verbose: bool = False, ): self.dirty_workers = dirty_workers self.dirty_threads = dirty_threads self.dirty_timeout = dirty_timeout self.verbose = verbose self.arbiter = None self.arbiter_pid = None self.socket_path = None self._tmpdir = None def start(self): """Start the dirty arbiter and workers.""" # Create temp directory for socket self._tmpdir = tempfile.mkdtemp(prefix="dirty-bench-") self.socket_path = os.path.join(self._tmpdir, "arbiter.sock") # Create config and logger cfg = MockConfig( dirty_apps=[BENCHMARK_APP], dirty_workers=self.dirty_workers, dirty_threads=self.dirty_threads, dirty_timeout=self.dirty_timeout, ) log = MockLogger(verbose=self.verbose) # Fork arbiter process pid = os.fork() if pid == 0: # Child process - run arbiter try: arbiter = DirtyArbiter(cfg, log, socket_path=self.socket_path) arbiter.run() except Exception as e: print(f"Arbiter error: {e}") finally: os._exit(0) # Parent process self.arbiter_pid = pid # Wait for arbiter socket to be ready for _ in range(50): # 5 seconds max if os.path.exists(self.socket_path): break time.sleep(0.1) else: raise RuntimeError("Arbiter socket not ready") # Give workers time to start time.sleep(0.5) def stop(self): """Stop the dirty arbiter.""" if self.arbiter_pid: try: os.kill(self.arbiter_pid, signal.SIGTERM) os.waitpid(self.arbiter_pid, 0) except (OSError, ChildProcessError): pass self.arbiter_pid = None # Cleanup temp directory if self._tmpdir: try: for f in os.listdir(self._tmpdir): os.unlink(os.path.join(self._tmpdir, f)) os.rmdir(self._tmpdir) except OSError: pass self._tmpdir = None def warmup(self, requests: int = 10): """Warm up the pool with a few requests.""" with DirtyClient(self.socket_path, timeout=30.0) as client: for _ in range(requests): client.execute(BENCHMARK_APP, "health") def run_benchmark( self, action: str, args: tuple = (), kwargs: dict = None, total_requests: int = 1000, concurrency: int = 10, timeout: float = 30.0, ) -> tuple[list[float], list[str]]: """ Run a benchmark with specified parameters. Each concurrent worker maintains a persistent connection to the arbiter and makes sequential requests. This simulates how real HTTP workers use the dirty client (one connection per worker thread). Args: action: Action to call on the benchmark app args: Positional arguments for the action kwargs: Keyword arguments for the action total_requests: Total number of requests to make concurrency: Number of concurrent clients timeout: Timeout per request in seconds Returns: Tuple of (latencies in ms, error messages) """ kwargs = kwargs or {} latencies = [] errors = [] lock = threading.Lock() # Calculate requests per worker requests_per_worker = total_requests // concurrency remainder = total_requests % concurrency def worker_task(num_requests: int) -> None: """Worker that makes sequential requests on a persistent connection.""" worker_latencies = [] worker_errors = [] try: client = DirtyClient(self.socket_path, timeout=timeout) client.connect() for _ in range(num_requests): try: start = time.perf_counter() client.execute(BENCHMARK_APP, action, *args, **kwargs) elapsed = (time.perf_counter() - start) * 1000 worker_latencies.append(elapsed) except Exception as e: worker_errors.append(str(e)) # Reconnect on error try: client.close() client = DirtyClient(self.socket_path, timeout=timeout) client.connect() except Exception: pass client.close() except Exception as e: worker_errors.append(f"Connection error: {e}") # Add results to shared lists with lock: latencies.extend(worker_latencies) errors.extend(worker_errors) # Run concurrent workers with ThreadPoolExecutor(max_workers=concurrency) as executor: futures = [] for i in range(concurrency): # Distribute remainder requests among first few workers num = requests_per_worker + (1 if i < remainder else 0) if num > 0: futures.append(executor.submit(worker_task, num)) # Wait for all workers to complete for future in as_completed(futures): future.result() # Raises any exceptions return latencies, errors class IntegratedBenchmark: """ Run benchmarks against gunicorn with dirty pool via HTTP. Uses wrk or ab for load testing, or falls back to Python requests. """ def __init__( self, url: str = "http://127.0.0.1:8000", verbose: bool = False, ): self.url = url.rstrip('/') self.verbose = verbose self._tool = None def check_dependencies(self) -> str | None: """Check for available load testing tools.""" for tool in ['wrk', 'ab']: try: subprocess.run([tool, '--version'], capture_output=True, check=False) return tool except FileNotFoundError: continue return None def warmup(self, requests: int = 10): """Warm up the server.""" import urllib.request for _ in range(requests): try: urllib.request.urlopen(f"{self.url}/health", timeout=5) except Exception: pass def run_wrk( self, path: str, duration: int = 10, threads: int = 4, connections: int = 100, ) -> dict: """Run wrk benchmark and parse results.""" url = f"{self.url}{path}" cmd = [ 'wrk', '-t', str(threads), '-c', str(connections), '-d', f'{duration}s', '--latency', url, ] result = subprocess.run(cmd, capture_output=True, text=True, check=False) return self._parse_wrk_output(result.stdout) def _parse_wrk_output(self, output: str) -> dict: """Parse wrk output to extract metrics.""" metrics = { 'requests_per_sec': 0.0, 'latency_ms': {}, 'errors': 0, } for line in output.split('\n'): if 'Requests/sec' in line: try: metrics['requests_per_sec'] = float( line.split(':')[1].strip()) except (ValueError, IndexError): pass elif 'Latency' in line and 'Distribution' not in line: parts = line.split() if len(parts) >= 2: metrics['latency_ms']['avg'] = self._parse_duration( parts[1]) elif '50%' in line: parts = line.split() if len(parts) >= 2: metrics['latency_ms']['p50'] = self._parse_duration( parts[1]) elif '99%' in line: parts = line.split() if len(parts) >= 2: metrics['latency_ms']['p99'] = self._parse_duration( parts[1]) elif 'Socket errors' in line: # Parse error counts parts = line.split(',') for part in parts: if any(x in part for x in ['connect', 'read', 'write', 'timeout']): try: metrics['errors'] += int(part.split()[-1]) except (ValueError, IndexError): pass return metrics def _parse_duration(self, s: str) -> float: """Parse wrk duration string (e.g., '12.34ms', '1.23s') to ms.""" s = s.strip() if s.endswith('us'): return float(s[:-2]) / 1000 elif s.endswith('ms'): return float(s[:-2]) elif s.endswith('s'): return float(s[:-1]) * 1000 else: return float(s) def run_python_benchmark( self, path: str, total_requests: int = 1000, concurrency: int = 10, timeout: float = 30.0, ) -> tuple[list[float], list[str]]: """ Run benchmark using Python urllib. Fallback when wrk/ab not available. """ import urllib.request import urllib.error url = f"{self.url}{path}" latencies = [] errors = [] def make_request() -> tuple[float | None, str | None]: try: start = time.perf_counter() urllib.request.urlopen(url, timeout=timeout) elapsed = (time.perf_counter() - start) * 1000 return elapsed, None except Exception as e: return None, str(e) with ThreadPoolExecutor(max_workers=concurrency) as executor: futures = [executor.submit(make_request) for _ in range(total_requests)] for future in as_completed(futures): latency, error = future.result() if latency is not None: latencies.append(latency) if error: errors.append(error) return latencies, errors def run_isolated_suite( workers: int = 2, threads: int = 1, verbose: bool = False, ) -> list[BenchmarkResult]: """Run the full isolated benchmark suite.""" results = [] bench = IsolatedBenchmark( dirty_workers=workers, dirty_threads=threads, verbose=verbose, ) print(f"\nStarting isolated benchmarks (workers={workers}, " f"threads={threads})...") try: bench.start() bench.warmup() # Define scenarios scenarios = [ # Baseline { "name": "baseline_10ms", "action": "sleep_task", "args": (10,), "requests": 1000, "concurrency": 1, "description": "Single request latency (10ms sleep)", }, # Throughput { "name": "throughput_10ms", "action": "sleep_task", "args": (10,), "requests": 5000, "concurrency": 100, "description": "Max requests/sec (10ms sleep, 100 clients)", }, # CPU Bound { "name": "cpu_bound_100ms", "action": "cpu_task", "args": (100,), "requests": 500, "concurrency": 20, "description": "CPU-bound work (100ms, 20 clients)", }, # I/O Bound { "name": "io_bound_500ms", "action": "sleep_task", "args": (500,), "requests": 200, "concurrency": 50, "description": "I/O-bound work (500ms sleep, 50 clients)", }, # Mixed { "name": "mixed_50_50", "action": "mixed_task", "args": (50, 50), "requests": 500, "concurrency": 30, "description": "Mixed workload (50ms sleep + 50ms CPU)", }, # Overload { "name": "overload_10ms", "action": "sleep_task", "args": (10,), "requests": 2000, "concurrency": 200, "description": "Overload test (10ms, 200 clients)", }, ] for scenario in scenarios: print(f" Running {scenario['name']}: {scenario['description']}...") start_time = time.perf_counter() latencies, errors = bench.run_benchmark( action=scenario["action"], args=scenario.get("args", ()), kwargs=scenario.get("kwargs"), total_requests=scenario["requests"], concurrency=scenario["concurrency"], ) duration = time.perf_counter() - start_time result = BenchmarkResult( scenario=scenario["name"], config={ "dirty_workers": workers, "dirty_threads": threads, "task_action": scenario["action"], "task_args": scenario.get("args", ()), "concurrency": scenario["concurrency"], }, total_requests=scenario["requests"], successful=len(latencies), failed=len(errors), errors=errors[:10] if errors else [], # First 10 errors duration_sec=round(duration, 2), requests_per_sec=round(len(latencies) / duration, 1), latency_ms=LatencyStats.from_samples(latencies), ) results.append(result) print(f" Requests/sec: {result.requests_per_sec:.1f}, " f"p50: {result.latency_ms.p50:.1f}ms, " f"p99: {result.latency_ms.p99:.1f}ms, " f"failed: {result.failed}") finally: bench.stop() return results def run_payload_suite( workers: int = 2, threads: int = 1, verbose: bool = False, ) -> list[BenchmarkResult]: """Run payload size benchmark suite.""" results = [] bench = IsolatedBenchmark( dirty_workers=workers, dirty_threads=threads, verbose=verbose, ) print(f"\nStarting payload benchmarks (workers={workers})...") try: bench.start() bench.warmup() # Payload sizes to test payload_sizes = [ (100, "100B", "Tiny payload"), (1024, "1KB", "Small payload"), (10240, "10KB", "Medium payload"), (102400, "100KB", "Large payload"), (1048576, "1MB", "Very large payload"), ] for size, size_label, description in payload_sizes: # Adjust concurrency for larger payloads concurrency = max(5, 100 // (size // 1024 + 1)) requests = max(100, 1000 // (size // 1024 + 1)) print(f" Running payload_{size_label}: {description}...") start_time = time.perf_counter() latencies, errors = bench.run_benchmark( action="payload_task", args=(size,), total_requests=requests, concurrency=concurrency, ) duration = time.perf_counter() - start_time result = BenchmarkResult( scenario=f"payload_{size_label}", config={ "dirty_workers": workers, "dirty_threads": threads, "payload_bytes": size, "concurrency": concurrency, }, total_requests=requests, successful=len(latencies), failed=len(errors), errors=errors[:5] if errors else [], duration_sec=round(duration, 2), requests_per_sec=round(len(latencies) / duration, 1), latency_ms=LatencyStats.from_samples(latencies), ) results.append(result) # Calculate throughput in MB/s throughput_mb = (len(latencies) * size) / duration / 1024 / 1024 print(f" Requests/sec: {result.requests_per_sec:.1f}, " f"p50: {result.latency_ms.p50:.1f}ms, " f"throughput: {throughput_mb:.1f} MB/s") finally: bench.stop() return results def run_quick_test(verbose: bool = False) -> list[BenchmarkResult]: """Run a quick smoke test.""" results = [] bench = IsolatedBenchmark(dirty_workers=1, dirty_threads=1, verbose=verbose) print("\nRunning quick smoke test...") try: bench.start() bench.warmup(5) # Simple test start_time = time.perf_counter() latencies, errors = bench.run_benchmark( action="sleep_task", args=(10,), total_requests=100, concurrency=10, ) duration = time.perf_counter() - start_time result = BenchmarkResult( scenario="quick_test", config={"dirty_workers": 1, "dirty_threads": 1}, total_requests=100, successful=len(latencies), failed=len(errors), errors=errors[:5] if errors else [], duration_sec=round(duration, 2), requests_per_sec=round(len(latencies) / duration, 1), latency_ms=LatencyStats.from_samples(latencies), ) results.append(result) print(f" Requests/sec: {result.requests_per_sec:.1f}, " f"p50: {result.latency_ms.p50:.1f}ms, " f"failed: {result.failed}") if result.failed == 0: print(" PASS: Quick test successful") else: print(f" WARN: {result.failed} requests failed") finally: bench.stop() return results def run_config_sweep(verbose: bool = False) -> list[BenchmarkResult]: """ Sweep through different configurations to find optimal settings. Tests combinations of workers and threads. """ results = [] configs = [ (1, 1), # Baseline (2, 1), # 2 workers, 1 thread each (4, 1), # 4 workers, 1 thread each (2, 2), # 2 workers, 2 threads each (2, 4), # 2 workers, 4 threads each (4, 2), # 4 workers, 2 threads each ] print("\nRunning configuration sweep...") for workers, threads in configs: print(f"\n Testing workers={workers}, threads={threads}...") bench = IsolatedBenchmark( dirty_workers=workers, dirty_threads=threads, verbose=verbose, ) try: bench.start() bench.warmup() # Run a standard workload start_time = time.perf_counter() latencies, errors = bench.run_benchmark( action="mixed_task", args=(20, 20), # 20ms sleep + 20ms CPU total_requests=1000, concurrency=50, ) duration = time.perf_counter() - start_time result = BenchmarkResult( scenario=f"config_w{workers}_t{threads}", config={ "dirty_workers": workers, "dirty_threads": threads, "task": "mixed_task(20, 20)", "concurrency": 50, }, total_requests=1000, successful=len(latencies), failed=len(errors), errors=errors[:5] if errors else [], duration_sec=round(duration, 2), requests_per_sec=round(len(latencies) / duration, 1), latency_ms=LatencyStats.from_samples(latencies), ) results.append(result) print(f" Requests/sec: {result.requests_per_sec:.1f}, " f"p50: {result.latency_ms.p50:.1f}ms, " f"p99: {result.latency_ms.p99:.1f}ms") finally: bench.stop() # Print summary print("\n Configuration Summary:") print(" " + "-" * 60) sorted_results = sorted(results, key=lambda r: -r.requests_per_sec) for r in sorted_results: cfg = r.config print(f" w={cfg['dirty_workers']}, t={cfg['dirty_threads']}: " f"{r.requests_per_sec:.1f} req/s, " f"p99={r.latency_ms.p99:.1f}ms") return results def generate_report(results: list[BenchmarkResult], output_path: str = None): """Generate a summary report from benchmark results.""" print("\n" + "=" * 70) print("BENCHMARK REPORT") print("=" * 70) for result in results: print(f"\n{result.scenario}") print("-" * 40) print(f" Config: {json.dumps(result.config, indent=None)}") print(f" Requests: {result.successful}/{result.total_requests} " f"({result.failed} failed)") print(f" Duration: {result.duration_sec}s") print(f" Throughput: {result.requests_per_sec:.1f} req/s") print(f" Latency (ms):") print(f" min: {result.latency_ms.min:.2f}") print(f" p50: {result.latency_ms.p50:.2f}") print(f" p95: {result.latency_ms.p95:.2f}") print(f" p99: {result.latency_ms.p99:.2f}") print(f" max: {result.latency_ms.max:.2f}") print(f" mean: {result.latency_ms.mean:.2f} " f"(stddev: {result.latency_ms.stddev:.2f})") if result.errors: print(f" Errors (first {len(result.errors)}):") for err in result.errors[:3]: print(f" - {err[:80]}") if output_path: output_data = { "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S"), "results": [r.to_dict() for r in results], } with open(output_path, 'w') as f: json.dump(output_data, f, indent=2) print(f"\nResults saved to: {output_path}") def main(): parser = argparse.ArgumentParser( description='Benchmark the gunicorn dirty pool', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=__doc__, ) # Mode selection mode_group = parser.add_mutually_exclusive_group() mode_group.add_argument('--quick', action='store_true', help='Run quick smoke test') mode_group.add_argument('--isolated', action='store_true', help='Run isolated benchmark suite') mode_group.add_argument('--payload-tests', action='store_true', help='Run payload size tests') mode_group.add_argument('--config-sweep', action='store_true', help='Sweep through configurations') mode_group.add_argument('--integrated', action='store_true', help='Run integrated HTTP benchmarks') # Configuration parser.add_argument('--workers', type=int, default=2, help='Number of dirty workers (default: 2)') parser.add_argument('--threads', type=int, default=1, help='Threads per dirty worker (default: 1)') parser.add_argument('--duration', type=int, default=10, help='Task duration in ms for custom run') parser.add_argument('--concurrency', type=int, default=10, help='Number of concurrent clients') parser.add_argument('--requests', type=int, default=1000, help='Total requests to make') # Integration mode options parser.add_argument('--url', default='http://127.0.0.1:8000', help='Server URL for integrated tests') # Output parser.add_argument('--output', '-o', help='Output JSON file for results') parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output') args = parser.parse_args() results = [] try: if args.quick: results = run_quick_test(verbose=args.verbose) elif args.isolated: results = run_isolated_suite( workers=args.workers, threads=args.threads, verbose=args.verbose, ) elif args.payload_tests: results = run_payload_suite( workers=args.workers, threads=args.threads, verbose=args.verbose, ) elif args.config_sweep: results = run_config_sweep(verbose=args.verbose) elif args.integrated: bench = IntegratedBenchmark(url=args.url, verbose=args.verbose) tool = bench.check_dependencies() if tool == 'wrk': print(f"\nRunning integrated benchmarks with wrk...") bench.warmup() # Run basic scenarios scenarios = [ ("/sleep?duration=10", "sleep_10ms"), ("/cpu?duration=100", "cpu_100ms"), ("/mixed?sleep=50&cpu=50", "mixed_50_50"), ] for path, name in scenarios: print(f" Running {name}...") metrics = bench.run_wrk(path, duration=10, connections=100) print(f" Requests/sec: {metrics.get('requests_per_sec', 'N/A')}") print("\nNote: For detailed results, use wrk directly:") print(f" wrk -t4 -c100 -d30s --latency '{args.url}/sleep?duration=10'") else: print("\nUsing Python fallback (install wrk for better results)...") bench.warmup() latencies, errors = bench.run_python_benchmark( "/sleep?duration=10", total_requests=args.requests, concurrency=args.concurrency, ) result = BenchmarkResult( scenario="integrated_sleep", config={"url": args.url, "concurrency": args.concurrency}, total_requests=args.requests, successful=len(latencies), failed=len(errors), errors=errors[:5], duration_sec=sum(latencies) / 1000 / args.concurrency, requests_per_sec=len(latencies) / (sum(latencies) / 1000 / args.concurrency), latency_ms=LatencyStats.from_samples(latencies), ) results.append(result) else: # Default: run custom single benchmark print(f"\nRunning custom benchmark: " f"duration={args.duration}ms, concurrency={args.concurrency}") bench = IsolatedBenchmark( dirty_workers=args.workers, dirty_threads=args.threads, verbose=args.verbose, ) try: bench.start() bench.warmup() start_time = time.perf_counter() latencies, errors = bench.run_benchmark( action="sleep_task", args=(args.duration,), total_requests=args.requests, concurrency=args.concurrency, ) duration = time.perf_counter() - start_time result = BenchmarkResult( scenario="custom", config={ "dirty_workers": args.workers, "dirty_threads": args.threads, "task_duration_ms": args.duration, "concurrency": args.concurrency, }, total_requests=args.requests, successful=len(latencies), failed=len(errors), errors=errors[:10], duration_sec=round(duration, 2), requests_per_sec=round(len(latencies) / duration, 1), latency_ms=LatencyStats.from_samples(latencies), ) results.append(result) finally: bench.stop() # Generate report if results: generate_report(results, args.output) except KeyboardInterrupt: print("\nBenchmark interrupted") sys.exit(1) except Exception as e: print(f"\nError: {e}") if args.verbose: import traceback traceback.print_exc() sys.exit(1) if __name__ == '__main__': main() benoitc-gunicorn-f5fb19e/benchmarks/dirty_streaming.py000066400000000000000000000552101514360242400233610ustar00rootroot00000000000000#!/usr/bin/env python # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Benchmark suite for dirty worker streaming functionality. This script benchmarks the streaming performance of dirty workers to measure throughput, latency, and memory usage. Usage: python benchmarks/dirty_streaming.py [OPTIONS] Options: --quick Run quick benchmarks only --full Run full benchmark suite including stress tests """ import argparse import asyncio import gc import json import os import struct import sys import time import tracemalloc from datetime import datetime from unittest import mock # Add parent directory to path sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from gunicorn.dirty.protocol import ( DirtyProtocol, make_request, make_chunk_message, make_end_message, make_response, ) from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.client import ( DirtyClient, DirtyStreamIterator, DirtyAsyncStreamIterator, ) from gunicorn.config import Config class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.bytes_written = 0 def write(self, data): self._buffer += data self.bytes_written += len(data) async def drain(self): while len(self._buffer) >= DirtyProtocol.HEADER_SIZE: length = struct.unpack( DirtyProtocol.HEADER_FORMAT, self._buffer[:DirtyProtocol.HEADER_SIZE] )[0] total_size = DirtyProtocol.HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[DirtyProtocol.HEADER_SIZE:total_size] self._buffer = self._buffer[total_size:] self.messages.append(DirtyProtocol.decode(msg_data)) else: break def close(self): pass async def wait_closed(self): pass class MockStreamReader: """Mock StreamReader that yields predefined messages.""" def __init__(self, messages): self._data = b'' for msg in messages: self._data += DirtyProtocol.encode(msg) self._pos = 0 async def readexactly(self, n): if self._pos + n > len(self._data): raise asyncio.IncompleteReadError(self._data[self._pos:], n) result = self._data[self._pos:self._pos + n] self._pos += n return result class MockLog: """Silent logger for benchmarks.""" def debug(self, msg, *args): pass def info(self, msg, *args): pass def warning(self, msg, *args): pass def error(self, msg, *args): pass def close_on_exec(self): pass def reopen_files(self): pass def create_worker(): """Create a test worker for benchmarks.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with mock.patch('gunicorn.dirty.worker.WorkerTmp'): worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["benchmark:App"], cfg=cfg, log=log, socket_path="/tmp/benchmark.sock" ) worker.apps = {} worker._executor = None worker.tmp = mock.Mock() return worker def create_arbiter(): """Create a test arbiter for benchmarks.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} return arbiter class BenchmarkResults: """Store and display benchmark results.""" def __init__(self): self.results = [] def add(self, name, iterations, duration, chunks=None, bytes_total=None, memory_start=None, memory_end=None): throughput = iterations / duration if duration > 0 else 0 result = { "name": name, "iterations": iterations, "duration_s": round(duration, 4), "throughput_per_s": round(throughput, 2), } if chunks: result["chunks_per_s"] = round(chunks / duration, 2) if bytes_total: result["mb_per_s"] = round(bytes_total / (1024 * 1024) / duration, 2) if memory_start is not None and memory_end is not None: result["memory_start_mb"] = round(memory_start / (1024 * 1024), 2) result["memory_end_mb"] = round(memory_end / (1024 * 1024), 2) result["memory_delta_mb"] = round((memory_end - memory_start) / (1024 * 1024), 2) self.results.append(result) def display(self): print("\n" + "=" * 70) print("BENCHMARK RESULTS") print("=" * 70) for result in self.results: print(f"\n{result['name']}") print("-" * 50) for key, value in result.items(): if key != "name": print(f" {key}: {value}") print("\n" + "=" * 70) def save_json(self, filepath): with open(filepath, 'w') as f: json.dump({ "timestamp": datetime.now().isoformat(), "results": self.results }, f, indent=2) print(f"Results saved to {filepath}") async def benchmark_worker_streaming_throughput(results, chunk_size=1024, num_chunks=1000): """Benchmark worker streaming throughput with various chunk sizes.""" worker = create_worker() writer = MockStreamWriter() chunk_data = "x" * chunk_size async def sync_gen(): for _ in range(num_chunks): yield chunk_data async def mock_execute(app_path, action, args, kwargs): return sync_gen() gc.collect() tracemalloc.start() memory_start = tracemalloc.get_traced_memory()[0] start = time.perf_counter() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request("bench-1", "benchmark:App", "stream") await worker.handle_request(request, writer) duration = time.perf_counter() - start memory_end = tracemalloc.get_traced_memory()[0] tracemalloc.stop() total_bytes = chunk_size * num_chunks results.add( f"Worker streaming ({chunk_size}B chunks, {num_chunks} chunks)", iterations=1, duration=duration, chunks=num_chunks, bytes_total=total_bytes, memory_start=memory_start, memory_end=memory_end ) async def benchmark_arbiter_forwarding(results, num_chunks=1000): """Benchmark arbiter message forwarding throughput.""" arbiter = create_arbiter() messages = [] for i in range(num_chunks): messages.append(make_chunk_message(f"bench-{i}", f"data-{i}")) messages.append(make_end_message(f"bench-{num_chunks}")) mock_reader = MockStreamReader(messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() gc.collect() start = time.perf_counter() request = make_request("bench-forward", "benchmark:App", "stream") await arbiter._execute_on_worker(1234, request, client_writer) duration = time.perf_counter() - start results.add( f"Arbiter forwarding ({num_chunks} chunks)", iterations=1, duration=duration, chunks=num_chunks, bytes_total=client_writer.bytes_written ) arbiter._cleanup_sync() async def benchmark_streaming_latency(results, iterations=100): """Benchmark time-to-first-chunk and time-to-last-chunk.""" worker = create_worker() first_chunk_times = [] total_times = [] for _ in range(iterations): writer = MockStreamWriter() async def gen_3_chunks(): yield "first" yield "second" yield "third" async def mock_execute(app_path, action, args, kwargs): return gen_3_chunks() start = time.perf_counter() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request("bench-latency", "benchmark:App", "stream") await worker.handle_request(request, writer) # Find time when first chunk was received if writer.messages: first_chunk_times.append(time.perf_counter() - start) total_times.append(time.perf_counter() - start) avg_first_chunk = sum(first_chunk_times) / len(first_chunk_times) if first_chunk_times else 0 avg_total = sum(total_times) / len(total_times) print(f"\nLatency Results ({iterations} iterations):") print(f" Avg time-to-first-chunk: {avg_first_chunk * 1000:.3f}ms") print(f" Avg time-to-last-chunk: {avg_total * 1000:.3f}ms") results.add( f"Streaming latency ({iterations} iterations)", iterations=iterations, duration=sum(total_times), chunks=iterations * 3 ) async def benchmark_concurrent_streams(results, num_streams=10, chunks_per_stream=100): """Benchmark multiple concurrent streams.""" arbiter = create_arbiter() async def run_stream(stream_id): messages = [] for i in range(chunks_per_stream): messages.append(make_chunk_message(f"stream-{stream_id}", f"chunk-{i}")) messages.append(make_end_message(f"stream-{stream_id}")) mock_reader = MockStreamReader(messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request(f"bench-concurrent-{stream_id}", "benchmark:App", "stream") await arbiter._execute_on_worker(1234, request, client_writer) return len(client_writer.messages) gc.collect() start = time.perf_counter() # Run streams concurrently tasks = [run_stream(i) for i in range(num_streams)] results_list = await asyncio.gather(*tasks) duration = time.perf_counter() - start total_chunks = sum(results_list) results.add( f"Concurrent streams ({num_streams} streams, {chunks_per_stream} chunks each)", iterations=num_streams, duration=duration, chunks=total_chunks ) arbiter._cleanup_sync() async def benchmark_memory_stability(results, iterations=10, chunks=1000): """Check memory stability over many iterations.""" worker = create_worker() gc.collect() tracemalloc.start() memory_samples = [tracemalloc.get_traced_memory()[0]] for i in range(iterations): writer = MockStreamWriter() async def gen_chunks(): for j in range(chunks): yield f"chunk-{j}" async def mock_execute(app_path, action, args, kwargs): return gen_chunks() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(f"bench-mem-{i}", "benchmark:App", "stream") await worker.handle_request(request, writer) gc.collect() memory_samples.append(tracemalloc.get_traced_memory()[0]) tracemalloc.stop() memory_start = memory_samples[0] memory_end = memory_samples[-1] memory_max = max(memory_samples) print(f"\nMemory stability ({iterations} iterations of {chunks} chunks):") print(f" Start: {memory_start / 1024 / 1024:.2f}MB") print(f" End: {memory_end / 1024 / 1024:.2f}MB") print(f" Max: {memory_max / 1024 / 1024:.2f}MB") print(f" Delta: {(memory_end - memory_start) / 1024 / 1024:.2f}MB") results.add( f"Memory stability ({iterations} x {chunks} chunks)", iterations=iterations * chunks, duration=0.001, # Use small non-zero value to avoid division by zero memory_start=memory_start, memory_end=memory_end ) class MockClientReader: """Mock async reader that simulates receiving streaming messages.""" def __init__(self, num_chunks, chunk_data): self.num_chunks = num_chunks self.chunk_data = chunk_data self._chunk_idx = 0 self._messages = [] self._build_messages() self._pos = 0 self._data = b'' for msg in self._messages: self._data += DirtyProtocol.encode(msg) def _build_messages(self): for i in range(self.num_chunks): self._messages.append(make_chunk_message(f"bench-{i}", self.chunk_data)) self._messages.append(make_end_message(f"bench-end")) async def readexactly(self, n): if self._pos + n > len(self._data): raise asyncio.IncompleteReadError(self._data[self._pos:], n) result = self._data[self._pos:self._pos + n] self._pos += n return result class MockClientWriter: """Mock async writer for client connection.""" def __init__(self): self._buffer = b"" self._closed = False def write(self, data): self._buffer += data async def drain(self): pass def close(self): self._closed = True async def wait_closed(self): pass async def benchmark_async_client_streaming(results, chunk_size=1024, num_chunks=1000): """ Benchmark DirtyAsyncStreamIterator directly. Measures async iterator overhead vs raw message reading. """ chunk_data = "x" * chunk_size # Create mock client with mock reader/writer client = DirtyClient("/tmp/benchmark.sock", timeout=30.0) client._reader = MockClientReader(num_chunks, chunk_data) client._writer = MockClientWriter() gc.collect() tracemalloc.start() memory_start = tracemalloc.get_traced_memory()[0] start = time.perf_counter() # Use the async stream iterator directly iterator = DirtyAsyncStreamIterator(client, "benchmark:App", "stream", (), {}) iterator._started = True # Skip the request sending iterator._request_id = "bench-async" iterator._deadline = time.perf_counter() + 300 # 5 min deadline iterator._last_chunk_time = time.perf_counter() chunks_received = 0 bytes_received = 0 async for chunk in iterator: chunks_received += 1 bytes_received += len(chunk) duration = time.perf_counter() - start memory_end = tracemalloc.get_traced_memory()[0] tracemalloc.stop() results.add( f"Async client streaming ({chunk_size}B chunks, {num_chunks} chunks)", iterations=1, duration=duration, chunks=chunks_received, bytes_total=bytes_received, memory_start=memory_start, memory_end=memory_end ) async def benchmark_sync_client_streaming(results, chunk_size=1024, num_chunks=1000): """ Benchmark DirtyStreamIterator directly (for comparison with async). Note: This runs the sync iterator within an async context for comparison. """ chunk_data = "x" * chunk_size # Build raw message data messages_data = b'' for i in range(num_chunks): msg = make_chunk_message(f"bench-{i}", chunk_data) messages_data += DirtyProtocol.encode(msg) messages_data += DirtyProtocol.encode(make_end_message("bench-end")) # Create a mock socket-like object class MockSocket: def __init__(self, data): self._data = data self._pos = 0 self._timeout = None def recv(self, n, flags=0): if self._pos >= len(self._data): return b'' result = self._data[self._pos:self._pos + n] self._pos += len(result) return result def settimeout(self, timeout): self._timeout = timeout # Create mock client client = DirtyClient("/tmp/benchmark.sock", timeout=30.0) client._sock = MockSocket(messages_data) gc.collect() tracemalloc.start() memory_start = tracemalloc.get_traced_memory()[0] start = time.perf_counter() # Use the sync stream iterator iterator = DirtyStreamIterator(client, "benchmark:App", "stream", (), {}) iterator._started = True # Skip the request sending iterator._request_id = "bench-sync" iterator._deadline = time.perf_counter() + 300 # 5 min deadline iterator._last_chunk_time = time.perf_counter() chunks_received = 0 bytes_received = 0 for chunk in iterator: chunks_received += 1 bytes_received += len(chunk) duration = time.perf_counter() - start memory_end = tracemalloc.get_traced_memory()[0] tracemalloc.stop() results.add( f"Sync client streaming ({chunk_size}B chunks, {num_chunks} chunks)", iterations=1, duration=duration, chunks=chunks_received, bytes_total=bytes_received, memory_start=memory_start, memory_end=memory_end ) async def benchmark_async_vs_sync_client_streaming(results, chunk_size=1024, num_chunks=1000): """ Compare stream() vs stream_async() performance with the same workload. """ chunk_data = "x" * chunk_size # --- Sync test --- messages_data = b'' for i in range(num_chunks): msg = make_chunk_message(f"bench-{i}", chunk_data) messages_data += DirtyProtocol.encode(msg) messages_data += DirtyProtocol.encode(make_end_message("bench-end")) class MockSocket: def __init__(self, data): self._data = data self._pos = 0 self._timeout = None def recv(self, n, flags=0): if self._pos >= len(self._data): return b'' result = self._data[self._pos:self._pos + n] self._pos += len(result) return result def settimeout(self, timeout): self._timeout = timeout sync_client = DirtyClient("/tmp/benchmark.sock", timeout=30.0) sync_client._sock = MockSocket(messages_data) gc.collect() sync_start = time.perf_counter() sync_iter = DirtyStreamIterator(sync_client, "benchmark:App", "stream", (), {}) sync_iter._started = True sync_iter._request_id = "bench-sync" sync_iter._deadline = time.perf_counter() + 300 # 5 min deadline sync_iter._last_chunk_time = time.perf_counter() sync_chunks = 0 for _ in sync_iter: sync_chunks += 1 sync_duration = time.perf_counter() - sync_start # --- Async test --- async_client = DirtyClient("/tmp/benchmark.sock", timeout=30.0) async_client._reader = MockClientReader(num_chunks, chunk_data) async_client._writer = MockClientWriter() gc.collect() async_start = time.perf_counter() async_iter = DirtyAsyncStreamIterator(async_client, "benchmark:App", "stream", (), {}) async_iter._started = True async_iter._request_id = "bench-async" async_iter._deadline = time.perf_counter() + 300 # 5 min deadline async_iter._last_chunk_time = time.perf_counter() async_chunks = 0 async for _ in async_iter: async_chunks += 1 async_duration = time.perf_counter() - async_start # Report comparison print(f"\nSync vs Async Client Streaming Comparison ({num_chunks} x {chunk_size}B chunks):") print(f" Sync: {sync_duration * 1000:.3f}ms ({sync_chunks} chunks)") print(f" Async: {async_duration * 1000:.3f}ms ({async_chunks} chunks)") if sync_duration > 0: ratio = async_duration / sync_duration print(f" Ratio (async/sync): {ratio:.3f}x") results.add( f"Sync client streaming comparison ({chunk_size}B, {num_chunks} chunks)", iterations=1, duration=sync_duration, chunks=sync_chunks, bytes_total=sync_chunks * chunk_size ) results.add( f"Async client streaming comparison ({chunk_size}B, {num_chunks} chunks)", iterations=1, duration=async_duration, chunks=async_chunks, bytes_total=async_chunks * chunk_size ) async def run_quick_benchmarks(): """Run quick benchmarks.""" results = BenchmarkResults() print("Running quick benchmarks...") await benchmark_worker_streaming_throughput(results, chunk_size=64, num_chunks=1000) await benchmark_worker_streaming_throughput(results, chunk_size=1024, num_chunks=1000) await benchmark_arbiter_forwarding(results, num_chunks=1000) await benchmark_streaming_latency(results, iterations=50) # Async client streaming benchmarks await benchmark_async_client_streaming(results, chunk_size=1024, num_chunks=1000) await benchmark_async_vs_sync_client_streaming(results, chunk_size=1024, num_chunks=1000) return results async def run_full_benchmarks(): """Run full benchmark suite including stress tests.""" results = BenchmarkResults() print("Running full benchmark suite...") # Throughput tests with different chunk sizes for chunk_size in [1, 64, 1024, 65536]: await benchmark_worker_streaming_throughput( results, chunk_size=chunk_size, num_chunks=1000 ) # Arbiter forwarding await benchmark_arbiter_forwarding(results, num_chunks=10000) # Latency await benchmark_streaming_latency(results, iterations=100) # Concurrent streams await benchmark_concurrent_streams(results, num_streams=10, chunks_per_stream=100) await benchmark_concurrent_streams(results, num_streams=50, chunks_per_stream=100) # Memory stability await benchmark_memory_stability(results, iterations=20, chunks=1000) # Async client streaming benchmarks for chunk_size in [64, 1024, 65536]: await benchmark_async_client_streaming(results, chunk_size=chunk_size, num_chunks=1000) await benchmark_sync_client_streaming(results, chunk_size=chunk_size, num_chunks=1000) # Comparison benchmark await benchmark_async_vs_sync_client_streaming(results, chunk_size=1024, num_chunks=5000) return results def main(): parser = argparse.ArgumentParser(description="Dirty streaming benchmarks") parser.add_argument("--quick", action="store_true", help="Run quick benchmarks only") parser.add_argument("--full", action="store_true", help="Run full benchmark suite") parser.add_argument("--output", "-o", help="Output JSON file path") args = parser.parse_args() if args.full: results = asyncio.run(run_full_benchmarks()) else: results = asyncio.run(run_quick_benchmarks()) results.display() if args.output: results.save_json(args.output) else: # Save to default location output_dir = os.path.dirname(os.path.abspath(__file__)) results_dir = os.path.join(output_dir, "results") os.makedirs(results_dir, exist_ok=True) timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") output_file = os.path.join(results_dir, f"streaming_benchmark_{timestamp}.json") results.save_json(output_file) if __name__ == "__main__": main() benoitc-gunicorn-f5fb19e/benchmarks/quick_bench.sh000077500000000000000000000022131514360242400224100ustar00rootroot00000000000000#!/bin/bash # Quick benchmark for gthread worker set -e cd "$(dirname "$0")" echo "Starting gunicorn with gthread worker..." ../.venv/bin/python -m gunicorn \ --worker-class gthread \ --workers 2 \ --threads 4 \ --worker-connections 1000 \ --bind 127.0.0.1:8765 \ --access-logfile /dev/null \ --error-logfile /dev/null \ --log-level warning \ simple_app:application & GUNICORN_PID=$! sleep 3 echo "" echo "=== Benchmark: Simple requests (10000 requests, 100 concurrent) ===" ab -n 10000 -c 100 -k http://127.0.0.1:8765/ 2>&1 | grep -E "(Requests per second|Time per request|Failed requests)" echo "" echo "=== Benchmark: High concurrency (5000 requests, 500 concurrent) ===" ab -n 5000 -c 500 -k http://127.0.0.1:8765/ 2>&1 | grep -E "(Requests per second|Time per request|Failed requests)" echo "" echo "=== Benchmark: Large response (1000 requests, 50 concurrent) ===" ab -n 1000 -c 50 -k http://127.0.0.1:8765/large 2>&1 | grep -E "(Requests per second|Time per request|Failed requests)" echo "" echo "Stopping gunicorn..." kill $GUNICORN_PID 2>/dev/null || true wait $GUNICORN_PID 2>/dev/null || true echo "Done!" benoitc-gunicorn-f5fb19e/benchmarks/results/000077500000000000000000000000001514360242400213015ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/benchmarks/results/queue_refactor_results.json000066400000000000000000000077761514360242400270070ustar00rootroot00000000000000{ "timestamp": "2026-01-24T10:56:33", "results": [ { "scenario": "baseline_10ms", "config": { "dirty_workers": 4, "dirty_threads": 1, "task_action": "sleep_task", "task_args": [ 10 ], "concurrency": 1 }, "total_requests": 1000, "successful": 1000, "failed": 0, "errors": [], "duration_sec": 12.27, "requests_per_sec": 81.5, "latency_ms": { "min": 10.432417009724304, "max": 13.792542013106868, "mean": 12.266892079642275, "stddev": 0.871026700472873, "p50": 12.80679099727422, "p95": 13.078375020995736, "p99": 13.141458010068163 } }, { "scenario": "throughput_10ms", "config": { "dirty_workers": 4, "dirty_threads": 1, "task_action": "sleep_task", "task_args": [ 10 ], "concurrency": 100 }, "total_requests": 5000, "successful": 5000, "failed": 0, "errors": [], "duration_sec": 14.95, "requests_per_sec": 334.4, "latency_ms": { "min": 11.470375000499189, "max": 341.3927500077989, "mean": 294.71728502821645, "stddev": 34.9421432011074, "p50": 305.2922079805285, "p95": 326.4670000062324, "p99": 334.32295799138956 } }, { "scenario": "cpu_bound_100ms", "config": { "dirty_workers": 4, "dirty_threads": 1, "task_action": "cpu_task", "task_args": [ 100 ], "concurrency": 20 }, "total_requests": 500, "successful": 500, "failed": 0, "errors": [], "duration_sec": 12.55, "requests_per_sec": 39.8, "latency_ms": { "min": 100.59350001392886, "max": 502.4004160077311, "mean": 493.9748328983551, "stddev": 48.57073135808595, "p50": 502.01483300770633, "p95": 502.21283300197683, "p99": 502.2801249870099 } }, { "scenario": "io_bound_500ms", "config": { "dirty_workers": 4, "dirty_threads": 1, "task_action": "sleep_task", "task_args": [ 500 ], "concurrency": 50 }, "total_requests": 200, "successful": 200, "failed": 0, "errors": [], "duration_sec": 25.19, "requests_per_sec": 7.9, "latency_ms": { "min": 501.3219590182416, "max": 6563.243499986129, "mean": 5566.4884116455505, "stddev": 1566.1525736181566, "p50": 6052.653749997262, "p95": 6553.810708021047, "p99": 6559.503666008823 } }, { "scenario": "mixed_50_50", "config": { "dirty_workers": 4, "dirty_threads": 1, "task_action": "mixed_task", "task_args": [ 50, 50 ], "concurrency": 30 }, "total_requests": 500, "successful": 500, "failed": 0, "errors": [], "duration_sec": 12.98, "requests_per_sec": 38.5, "latency_ms": { "min": 102.34933299943805, "max": 839.0888340072706, "mean": 756.4045974735054, "stddev": 103.21897997316475, "p50": 762.6495829899795, "p95": 832.905125018442, "p99": 836.0978330019861 } }, { "scenario": "overload_10ms", "config": { "dirty_workers": 4, "dirty_threads": 1, "task_action": "sleep_task", "task_args": [ 10 ], "concurrency": 200 }, "total_requests": 2000, "successful": 2000, "failed": 0, "errors": [], "duration_sec": 5.99, "requests_per_sec": 334.1, "latency_ms": { "min": 10.763874975964427, "max": 625.4918330232613, "mean": 565.1407622727129, "stddev": 104.98938999734894, "p50": 590.0453749927692, "p95": 617.4105420068372, "p99": 621.7636249784846 } } ] }benoitc-gunicorn-f5fb19e/benchmarks/run_benchmark.py000066400000000000000000000164521514360242400230000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python3 """ Benchmark script for gunicorn gthread worker. This script runs various benchmarks against gunicorn and reports performance metrics. Requires: gunicorn, requests (for warmup), and wrk or ab for load testing. """ import argparse import json import os import signal import subprocess import sys import time from pathlib import Path BENCHMARK_DIR = Path(__file__).parent APP_MODULE = "simple_app:application" def check_dependencies(): """Check if required tools are available.""" # Check for wrk (preferred) or ab for tool in ['wrk', 'ab']: try: subprocess.run([tool, '--version'], capture_output=True, check=False) return tool except FileNotFoundError: continue print("Error: Neither 'wrk' nor 'ab' found. Install one of them.") print(" macOS: brew install wrk") print(" Linux: apt-get install wrk (or apache2-utils for ab)") sys.exit(1) def start_gunicorn(worker_class, workers, threads, connections, bind, extra_args=None): """Start gunicorn server and return the process.""" cmd = [ sys.executable, '-m', 'gunicorn', '--worker-class', worker_class, '--workers', str(workers), '--threads', str(threads), '--worker-connections', str(connections), '--bind', bind, '--access-logfile', '-', '--error-logfile', '-', '--log-level', 'warning', APP_MODULE, ] if extra_args: cmd.extend(extra_args) env = os.environ.copy() env['PYTHONPATH'] = str(BENCHMARK_DIR.parent) proc = subprocess.Popen( cmd, cwd=BENCHMARK_DIR, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # Wait for server to be ready time.sleep(2) return proc def stop_gunicorn(proc): """Stop the gunicorn server.""" proc.send_signal(signal.SIGTERM) try: proc.wait(timeout=5) except subprocess.TimeoutExpired: proc.kill() proc.wait() def run_wrk_benchmark(url, duration, threads, connections): """Run wrk benchmark and return results.""" cmd = [ 'wrk', '-t', str(threads), '-c', str(connections), '-d', f'{duration}s', '--latency', url, ] result = subprocess.run(cmd, capture_output=True, text=True, check=False) return parse_wrk_output(result.stdout) def run_ab_benchmark(url, requests, concurrency): """Run Apache Bench benchmark and return results.""" cmd = [ 'ab', '-n', str(requests), '-c', str(concurrency), '-k', # keepalive url, ] result = subprocess.run(cmd, capture_output=True, text=True, check=False) return parse_ab_output(result.stdout) def parse_wrk_output(output): """Parse wrk output to extract metrics.""" metrics = {} for line in output.split('\n'): if 'Requests/sec' in line: metrics['requests_per_sec'] = float(line.split(':')[1].strip()) elif 'Transfer/sec' in line: metrics['transfer_per_sec'] = line.split(':')[1].strip() elif 'Latency' in line and 'Distribution' not in line: parts = line.split() if len(parts) >= 2: metrics['latency_avg'] = parts[1] elif '50%' in line: metrics['latency_p50'] = line.split()[1] elif '99%' in line: metrics['latency_p99'] = line.split()[1] return metrics def parse_ab_output(output): """Parse ab output to extract metrics.""" metrics = {} for line in output.split('\n'): if 'Requests per second' in line: metrics['requests_per_sec'] = float(line.split(':')[1].split()[0]) elif 'Time per request' in line and 'mean' in line: metrics['latency_avg'] = line.split(':')[1].strip() elif 'Transfer rate' in line: metrics['transfer_per_sec'] = line.split(':')[1].strip() return metrics def run_benchmark_suite(tool, bind_addr): """Run a suite of benchmarks.""" results = {} # Test configurations configs = [ {'name': 'simple', 'path': '/', 'connections': 100}, {'name': 'simple_high_concurrency', 'path': '/', 'connections': 500}, {'name': 'slow_io', 'path': '/slow', 'connections': 50}, {'name': 'large_response', 'path': '/large', 'connections': 100}, ] for config in configs: url = f'http://{bind_addr}{config["path"]}' print(f" Running {config['name']}...") if tool == 'wrk': metrics = run_wrk_benchmark( url, duration=10, threads=4, connections=config['connections'], ) else: metrics = run_ab_benchmark( url, requests=10000, concurrency=config['connections'], ) results[config['name']] = metrics print(f" Requests/sec: {metrics.get('requests_per_sec', 'N/A')}") return results def main(): parser = argparse.ArgumentParser(description='Benchmark gunicorn gthread worker') parser.add_argument('--workers', type=int, default=2, help='Number of workers') parser.add_argument('--threads', type=int, default=4, help='Threads per worker') parser.add_argument('--connections', type=int, default=1000, help='Worker connections') parser.add_argument('--bind', default='127.0.0.1:8000', help='Bind address') parser.add_argument('--compare', action='store_true', help='Compare sync vs gthread') parser.add_argument('--output', help='Output JSON file for results') args = parser.parse_args() tool = check_dependencies() print(f"Using benchmark tool: {tool}") all_results = {} if args.compare: # Compare sync and gthread workers for worker_class in ['sync', 'gthread']: print(f"\nBenchmarking {worker_class} worker...") proc = start_gunicorn( worker_class=worker_class, workers=args.workers, threads=args.threads, connections=args.connections, bind=args.bind, ) try: all_results[worker_class] = run_benchmark_suite(tool, args.bind) finally: stop_gunicorn(proc) else: # Just benchmark gthread print("\nBenchmarking gthread worker...") proc = start_gunicorn( worker_class='gthread', workers=args.workers, threads=args.threads, connections=args.connections, bind=args.bind, ) try: all_results['gthread'] = run_benchmark_suite(tool, args.bind) finally: stop_gunicorn(proc) # Print summary print("\n" + "=" * 60) print("BENCHMARK SUMMARY") print("=" * 60) for worker, results in all_results.items(): print(f"\n{worker.upper()} Worker:") for test, metrics in results.items(): rps = metrics.get('requests_per_sec', 'N/A') print(f" {test}: {rps} req/s") if args.output: with open(args.output, 'w') as f: json.dump(all_results, f, indent=2) print(f"\nResults saved to {args.output}") if __name__ == '__main__': main() benoitc-gunicorn-f5fb19e/benchmarks/simple_app.py000066400000000000000000000010461514360242400223040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Simple WSGI app for benchmarking def application(environ, start_response): """Basic hello world response.""" path = environ.get('PATH_INFO', '/') if path == '/large': body = b'X' * 65536 # 64KB else: body = b'Hello, World!' status = '200 OK' headers = [ ('Content-Type', 'text/plain'), ('Content-Length', str(len(body))), ] start_response(status, headers) return [body] benoitc-gunicorn-f5fb19e/docker/000077500000000000000000000000001514360242400167325ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docker/.dockerignore000066400000000000000000000000751514360242400214100ustar00rootroot00000000000000.git .github __pycache__ *.pyc .pytest_cache .tox docs tests benoitc-gunicorn-f5fb19e/docker/Dockerfile000066400000000000000000000017231514360242400207270ustar00rootroot00000000000000FROM python:3.12-slim LABEL org.opencontainers.image.source=https://github.com/benoitc/gunicorn LABEL org.opencontainers.image.description="Gunicorn Python WSGI HTTP Server" LABEL org.opencontainers.image.licenses=MIT # Create non-root user RUN useradd --create-home --shell /bin/bash gunicorn WORKDIR /app # Install gunicorn from source COPY pyproject.toml README.md LICENSE ./ COPY gunicorn/ ./gunicorn/ RUN pip install --no-cache-dir . # Copy entrypoint script COPY docker/docker-entrypoint.sh /usr/local/bin/ RUN chmod +x /usr/local/bin/docker-entrypoint.sh # Configuration via environment: # GUNICORN_BIND - full bind address (default: 0.0.0.0:8000) # GUNICORN_HOST - bind host (default: 0.0.0.0) # GUNICORN_PORT - bind port (default: 8000) # GUNICORN_WORKERS - number of workers (default: 2 * CPU + 1) # GUNICORN_ARGS - additional arguments (e.g., "--timeout 120") USER gunicorn EXPOSE 8000 ENTRYPOINT ["docker-entrypoint.sh"] CMD ["--help"] benoitc-gunicorn-f5fb19e/docker/docker-entrypoint.sh000066400000000000000000000023401514360242400227450ustar00rootroot00000000000000#!/bin/bash set -e # Allow running other commands (e.g., bash for debugging) if [ "${1:0:1}" = '-' ] || [ -z "${1##*:*}" ]; then # First arg is a flag or contains ':' (app:callable), run gunicorn # Build bind address from GUNICORN_HOST and GUNICORN_PORT, or use GUNICORN_BIND PORT="${GUNICORN_PORT:-8000}" BIND="${GUNICORN_BIND:-${GUNICORN_HOST:-0.0.0.0}:${PORT}}" # Add bind if not specified in args or GUNICORN_ARGS if [[ ! " $* $GUNICORN_ARGS " =~ " --bind " ]] && [[ ! " $* $GUNICORN_ARGS " =~ " -b " ]] && [[ ! "$* $GUNICORN_ARGS" =~ --bind= ]] && [[ ! "$* $GUNICORN_ARGS" =~ -b= ]]; then set -- --bind "$BIND" "$@" fi # Add workers if not specified - default to (2 * CPU_COUNT) + 1 if [[ ! " $* $GUNICORN_ARGS " =~ " --workers " ]] && [[ ! " $* $GUNICORN_ARGS " =~ " -w " ]] && [[ ! "$* $GUNICORN_ARGS" =~ --workers= ]] && [[ ! "$* $GUNICORN_ARGS" =~ -w= ]]; then WORKERS="${GUNICORN_WORKERS:-$(( 2 * $(nproc) + 1 ))}" set -- --workers "$WORKERS" "$@" fi # Append GUNICORN_ARGS if set if [ -n "$GUNICORN_ARGS" ]; then exec gunicorn $GUNICORN_ARGS "$@" fi exec gunicorn "$@" fi # Otherwise, run the command as-is (e.g., bash, sh, python) exec "$@" benoitc-gunicorn-f5fb19e/docs/000077500000000000000000000000001514360242400164135ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/README.md000066400000000000000000000006621514360242400176760ustar00rootroot00000000000000# Generate Documentation ## Requirements Install the documentation dependencies with: ```bash pip install -r requirements_dev.txt ``` This provides MkDocs with the Material theme and supporting plugins. ## Build static HTML ```bash mkdocs build ``` The rendered site is emitted into the `site/` directory. ## Preview locally ```bash mkdocs serve ``` This serves the documentation at http://127.0.0.1:8000/ with live reload. benoitc-gunicorn-f5fb19e/docs/content/000077500000000000000000000000001514360242400200655ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/2010-news.md000066400000000000000000000142531514360242400217500ustar00rootroot00000000000000 # Changelog - 2010 ## 0.12.0 / 2010-12-22 - Add support for logging configuration using a ini file. It uses the standard Python logging's module Configuration file format and allows anyone to use his custom file handler - Add IPV6 support - Add multidomain application example - Improve gunicorn_django command when importing settings module using DJANGO_SETTINGS_MODULE environment variable - Send appropriate error status on http parsing - Fix pidfile, set permissions so other user can read it and use it. - Fix temporary file leaking - Fix setpgrp issue, can now be launched via ubuntu upstart - Set the number of workers to zero on WINCH ## 0.11.2 / 2010-10-30 * Add SERVER_SOFTWARE to the os.environ * Add support for django settings environment variable * Add support for logging configuration in Paster ini-files * Improve arbiter notification in asynchronous workers * Display the right error when a worker can't be used * Fix Django support * Fix HUP with Paster applications * Fix readline in wsgi.input ## 0.11.1 / 2010-09-02 * Implement max-requests feature to prevent memory leaks. * Added 'worker_exit' server hook. * Reseed the random number generator after fork(). * Improve Eventlet worker. * Fix Django command `run_gunicorn`. * Fix the default proc name internal setting. * Workaround to prevent Gevent worker to segfault on MacOSX. ## 0.11.0 / 2010-08-12 * Improve dramatically performances of Gevent and Eventlet workers * Optimize HTTP parsing * Drop Server and Date headers in start_response when provided. * Fix latency issue in async workers ## 0.10.1 / 2010-08-06 * Improve gevent's workers. Add "egg:gunicorn#gevent_wsgi" worker using `gevent.wsgi `_ and "egg:gunicorn#gevent_pywsgi" worker using `gevent.pywsgi `_ . **"egg:gunicorn#gevent"** using our own HTTP parser is still here and is **recommended** for normal uses. Use the "gevent.wsgi" parser if you need really fast connections and don't need streaming, keepalive or ssl. * Add pre/post request hooks * Exit more quietly * Fix gevent dns issue ## 0.10.0 / 2010-07-08 * New HTTP parser. * New HUP behaviour. Re-reads the configuration and then reloads all worker processes without changing the master process id. Helpful for code reloading and monitoring applications like supervisord and runit. * Added a preload configuration parameter. By default, application code is now loaded after a worker forks. This couple with the new HUP handling can be used for dev servers to do hot code reloading. Using the preload flag can help a bit in small memory VM's. * Allow people to pass command line arguments to WSGI applications. See: `examples/alt_spec.py `_ * Added an example gevent reloader configuration: `examples/example_gevent_reloader.py `_. * New gevent worker "egg:gunicorn#gevent2", working with gevent.wsgi. * Internal refactoring and various bug fixes. * New documentation website. ## 0.9.1 / 2010-05-26 * Support https via X-Forwarded-Protocol or X-Forwarded-Ssl headers * Fix configuration * Remove -d options which was used instead of -D for daemon. * Fix umask in unix socket ## 0.9.0 / 2010-05-24 * Added *when_ready* hook. Called just after the server is started * Added *preload* setting. Load application code before the worker processes are forked. * Refactored Config * Fix pidfile * Fix QUIT/HUP in async workers * Fix reexec * Documentation improvements ## 0.8.1 / 2010-04-29 * Fix builtins import in config * Fix installation with pip * Fix Tornado WSGI support * Delay application loading until after processing all configuration ## 0.8.0 / 2010-04-22 * Refactored Worker management for better async support. Now use the -k option to set the type of request processing to use * Added support for Tornado_ ## 0.7.2 / 2010-04-15 * Added --spew option to help debugging (installs a system trace hook) * Some fixes in async arbiters * Fix a bug in start_response on error ## 0.7.1 / 2010-04-01 * Fix bug when responses have no body. ## 0.7.0 / 2010-03-26 * Added support for Eventlet_ and Gevent_ based workers. * Added Websockets_ support * Fix Chunked Encoding * Fix SIGWINCH on OpenBSD_ * Fix `PEP 333`_ compliance for the write callable. ## 0.6.5 / 2010-03-11 * Fix pidfile handling * Fix Exception Error ## 0.6.4 / 2010-03-08 * Use cStringIO for performance when possible. * Fix worker freeze when a remote connection closes unexpectedly. ## 0.6.3 / 2010-03-07 * Make HTTP parsing faster. * Various bug fixes ## 0.6.2 / 2010-03-01 * Added support for chunked response. * Added proc_name option to the config file. * Improved the HTTP parser. It now uses buffers instead of strings to store temporary data. * Improved performance when sending responses. * Workers are now murdered by age (the oldest is killed first). ## 0.6.1 / 2010-02-24 * Added gunicorn config file support for Django admin command * Fix gunicorn config file. -c was broken. * Removed TTIN/TTOU from workers which blocked other signals. ## 0.6.0 / 2010-02-22 * Added setproctitle support * Change privilege switch behavior. We now work like NGINX, master keeps the permissions, new uid/gid permissions are only set for workers. ## 0.5.1 / 2010-02-22 * Fix umask * Added Debian packaging ## 0.5.0 / 2010-02-20 * Added `configuration file `_ handler. * Added support for pre/post fork hooks * Added support for before_exec hook * Added support for unix sockets * Added launch of workers processes under different user/group * Added umask option * Added SCRIPT_NAME support * Better support of some exotic settings for Django projects * Better support of Paste-compatible applications * Some refactoring to make the code easier to hack * Allow multiple keys in request and response headers .. _Tornado: http://www.tornadoweb.org/ .. _`PEP 333`: https://www.python.org/dev/peps/pep-0333/ .. _Eventlet: http://eventlet.net/ .. _Gevent: http://www.gevent.org/ .. _OpenBSD: https://www.openbsd.org/ .. _Websockets: https://html.spec.whatwg.org/multipage/web-sockets.html benoitc-gunicorn-f5fb19e/docs/content/2011-news.md000066400000000000000000000037061514360242400217520ustar00rootroot00000000000000 # Changelog - 2011 ## 0.13.4 / 2011-09-23 - fix util.closerange function used to prevent leaking fds on python 2.5 (typo.md) ## 0.13.3 / 2011-09-19 - refactor gevent worker - prevent leaking fds on reexec - fix inverted request_time computation ## 0.13.2 / 2011-09-17 - Add support for Tornado 2.0 in tornado worker - Improve access logs: allows customisation of the log format & add request time - Logger module is now pluggable - Improve graceful shutdown in Python versions >= 2.6 - Fix post_request root arity for compatibility - Fix sendfile support - Fix Django reloading ## 0.13.1 / 2011-08-22 - Fix unix socket. log argument was missing. ## 0.13.0 / 2011-08-22 - Improve logging: allows file-reopening and add access log file compatible with the `apache combined log format `_ - Add the possibility to set custom SSL headers. X-Forwarded-Protocol and X-Forwarded-SSL are still the default - New `on_reload` hook to customize how gunicorn spawn new workers on SIGHUP - Handle projects with relative path in django_gunicorn command - Preserve path parameters in PATH_INFO - post_request hook now accepts the environ as argument. - When stopping the arbiter, close the listener asap. - Fix Django command `run_gunicorn` in settings reloading - Fix Tornado_ worker exiting - Fix the use of sendfile in wsgi.file_wrapper ## 0.12.2 / 2011-05-18 - Add wsgi.file_wrapper optimised for FreeBSD, Linux & MacOSX (use sendfile if available) - Fix django run_gunicorn command. Make sure we reload the application code. - Fix django localisation - Compatible with gevent 0.14dev ## 0.12.1 / 2011-03-23 - Add "on_starting" hook. This hook can be used to set anything before the arbiter really start - Support bdist_rpm in setup - Improve content-length handling (pep 3333) - Improve Django support - Fix daemonizing (#142) - Fix ipv6 handling .. _Tornado: http://www.tornadoweb.org/ benoitc-gunicorn-f5fb19e/docs/content/2012-news.md000066400000000000000000000076461514360242400217620ustar00rootroot00000000000000 # Changelog - 2012 ## 0.17.0 / 2012-12-25 - allows gunicorn to bind to multiple address - add SSL support - add syslog support - add nworkers_changed hook - add response arg for post_request hook - parse command line with argparse (replace deprecated optparse) - fix PWD detection in arbiter - miscellaneous PEP8 fixes ## 0.16.1 / 2012-11-19 - Fix packaging ## 0.16.0 / 2012-11-19 - **Added support for Python 3.2 & 3.3** - Expose --pythonpath command to all gunicorn commands - Honor $PORT environment variable, useful for deployment on heroku - Removed support for Python 2.5 - Make sure we reopen the logs on the console - Fix django settings module detection from path - Reverted timeout for client socket. Fix issue on blocking issues. - Fixed gevent worker ## 0.15.0 / 2012-10-18 - new documentation site on https://gunicorn.org - new website on http://gunicorn.org - add `haproxy PROXY protocol `_ support - add ForwardedAllowIPS option: allows to filter Front-end's IPs allowed to handle X-Forwarded-* headers. - add callable hooks for paster config - add x-forwarded-proto as secure scheme default (Heroku is using this) - allows gunicorn to load a pre-compiled application - support file reopening & reexec for all loggers - initialize the logging config file with defaults. - set timeout for client socket (slow client DoS). - NoMoreData, ChunkMissingTerminator, InvalidChunkSize are now IOError exceptions - fix graceful shutdown in gevent - fix limit request line check ## 0.14.6 / 2012-07-26 - fix gevent & subproces - fix request line length check - fix keepalive = 0 - fix tornado worker ## 0.14.5 / 2012-06-24 - fix logging during daemonisation ## 0.14.4 / 2012-06-24 - new --graceful-timeout option - fix multiple issues with request limit - more fixes in django settings resolutions - fix gevent.core import - fix keepalive=0 in eventlet worker - fix handle_error display with the unix worker - fix tornado.wsgi.WSGIApplication calling error - **breaking change**: take the control on graceful reload back. graceful can't be overridden anymore using the on_reload function. ## 0.14.3 / 2012-05-15 - improvement: performance of http.body.Body.readline() - improvement: log HTTP errors in access log like Apache - improvement: display traceback when the worker fails to boot - improvement: makes gunicorn work with gevent 1.0 - examples: websocket example now supports hybi13 - fix: reopen log files after initialization - fix: websockets support - fix: django1.4 support - fix: only load the paster application 1 time ## 0.14.2 / 2012-03-16 - add validate_class validator: allows to use a class or a method to initialize the app during in-code configuration - add support for max_requests in tornado worker - add support for disabling x_forwarded_for_header in tornado worker - gevent_wsgi is now an alias of gevent_pywsgi - Fix gevent_pywsgi worker ## 0.14.1 / 2012-03-02 - fixing source archive, reducing its size ## 0.14.0 / 2012-02-27 - check if Request line is too large: You can now pass the parameter ``--limit-request-line`` or set the ``limit_request_line`` in your configuration file to set the max size of the request line in bytes. - limit the number of headers fields and their size. Add ``--limit-request-field`` and ``limit-request-field-size`` settings - add ``p`` variable to the log access format to log pidfile - add ``{HeaderName}o`` variable to the logo access format to log the response header HeaderName - request header is now logged with the variable ``{HeaderName}i`` in the access log file - improve error logging - support logging.configFile - support django 1.4 in both gunicorn_django & run_gunicorn command - improve reload in django run_gunicorn command (should just work now) - allows people to set the ``X-Forwarded-For`` header key and disable it by setting an empty string. - fix support of Tornado - many other fixes. benoitc-gunicorn-f5fb19e/docs/content/2013-news.md000066400000000000000000000061411514360242400217500ustar00rootroot00000000000000 # Changelog - 2013 ## 18.0 / 2013-08-26 - new: add ``-e/--env`` command line argument to pass an environment variables to gunicorn - new: add ``--chdir`` command line argument to specified directory before apps loading. - new: add wsgi.file_wrapper support in async workers - new: add ``--paste`` command line argument to set the paster config file - deprecated: the command ``gunicorn_django`` is now deprecated. You should now run your application with the WSGI interface installed with your project (see https://docs.djangoproject.com/en/1.4/howto/deployment/wsgi/gunicorn/) for more infos. - deprecated: the command ``gunicorn_paste`` is deprecated. You now should use the new ``--paste`` argument to set the configuration file of your paster application. - fix: Removes unmatched leading quote from the beginning of the default access log format string - fix: null timeout - fix: gevent worker - fix: don't reload the paster app when using pserve - fix: after closing for error do not keep alive the connection - fix: responses 1xx, 204 and 304 should not force the connection to be closed ## 17.5 / 2013-07-03 - new: add signals documentation - new: add post_worker_init hook for workers - new: try to use gunicorn.conf.py in current folder as the default config file. - fix graceful timeout with the Eventlet worker - fix: don't raise an error when closing the socket if already closed - fix: fix --settings parameter for django application and try to find the django settings when using the ``gunicorn`` command. - fix: give the initial global_conf to paster application - fix: fix 'Expect: 100-continue' support on Python 3 ### New versioning: With this release, the versioning of Gunicorn is changing. Gunicorn is stable since a long time and there is no point to release a "1.0" now. It should have been done since a long time. 0.17 really meant it was the 17th stable version. From the beginning we have only 2 kind of releases: major release: releases with major changes or huge features added services releases: fixes and minor features added So from now we will apply the following versioning ``.``. For example ``17.5`` is a service release. ## 0.17.4 / 2013-04-24 - fix unix socket address parsing ## 0.17.3 / 2013-04-23 - add systemd sockets support - add ``python -m gunicorn.app.wsgiapp`` support - improve logger class inheritance - exit when the config file isn't found - add the -R option to enable stdio inheritance in daemon mode - don't close file descriptors > 3 in daemon mode - improve STDOUT/STDERR logging - fix pythonpath option - fix pidfile creation on Python 3 - fix gevent worker exit - fix ipv6 detection when the platform isn't supporting it ## 0.17.2 / 2013-01-07 - optimize readline - make imports errors more visible when loading an app or a logging class - fix tornado worker: don't pass ssl options if there are none - fix PEP3333: accept only bytetrings in the response body - fix support on CYGWIN platforms ## 0.17.1 / 2013-01-05 - add syslog facility name setting - fix ``--version`` command line argument - fix wsgi url_scheme for https benoitc-gunicorn-f5fb19e/docs/content/2014-news.md000066400000000000000000000166501514360242400217570ustar00rootroot00000000000000 # Changelog - 2014 !!! note Please see [news](news.md) for the latest changes. ## 19.1.1 / 2014-08-16 ### Changes ### Core - fix [Issue #835](https://github.com/benoitc/gunicorn/issues/835): display correct pid of already running instance - fix [PR #833](https://github.com/benoitc/gunicorn/pull/833): fix `PyTest` class in setup.py. ### Logging - fix [Issue #838](https://github.com/benoitc/gunicorn/issues/838): statsd logger, send statsd timing metrics in milliseconds - fix [Issue #839](https://github.com/benoitc/gunicorn/issues/839): statsd logger, allows for empty log message while pushing metrics and restore worker number in DEBUG logs - fix [Issue #850](https://github.com/benoitc/gunicorn/issues/850): add timezone to logging - fix [Issue #853](https://github.com/benoitc/gunicorn/issues/853): Respect logger_class setting unless statsd is on ### AioHttp worker - fix [Issue #830](https://github.com/benoitc/gunicorn/issues/830) make sure gaiohttp worker is shipped with gunicorn. ## 19.1 / 2014-07-26 ### Changes ### Core - fix [Issue #785](https://github.com/benoitc/gunicorn/issues/785): handle binary type address given to a client socket address - fix graceful shutdown. make sure QUIT and TERMS signals are switched everywhere. - [Issue #799](https://github.com/benoitc/gunicorn/issues/799): fix support loading config from module - [Issue #805](https://github.com/benoitc/gunicorn/issues/805): fix check for file-like objects - fix [Issue #815](https://github.com/benoitc/gunicorn/issues/815): args validation in WSGIApplication.init - fix [Issue #787](https://github.com/benoitc/gunicorn/issues/787): check if we load a pyc file or not. ### Tornado worker - fix [Issue #771](https://github.com/benoitc/gunicorn/issues/771): support tornado 4.0 - fix [Issue #783](https://github.com/benoitc/gunicorn/issues/783): x_headers error. The x-forwarded-headers option has been removed in `c4873681299212d6082cd9902740eef18c2f14f1 `_. The discussion is available on [PR #633](https://github.com/benoitc/gunicorn/pull/633). ### AioHttp worker - fix: fetch all body in input. fix [Issue #803](https://github.com/benoitc/gunicorn/issues/803) - fix: don't install the worker if python < 3.3 - fix [Issue #822](https://github.com/benoitc/gunicorn/issues/822): Support UNIX sockets in gaiohttp worker ### Async worker - fix [Issue #790](https://github.com/benoitc/gunicorn/issues/790): StopIteration shouldn't be caught at this level. ### Logging - add statsd logging handler fix [Issue #748](https://github.com/benoitc/gunicorn/issues/748) ### Paster - fix [Issue #809](https://github.com/benoitc/gunicorn/issues/809): Set global logging configuration from a Paste config. ### Extra - fix RuntimeError in gunicorn.reloader ([Issue #807](https://github.com/benoitc/gunicorn/issues/807)) ### Documentation - update faq: put a note on how `watch logs in the console `_ since many people asked for it. ## 19.0 / 2014-06-12 Gunicorn 19.0 is a major release with new features and fixes. This version improve a lot the usage of Gunicorn with python 3 by adding `two new workers `_ to it: `gthread` a fully threaded async worker using futures and `gaiohttp` a worker using asyncio. ### Breaking Changes ### Switch QUIT and TERM signals With this change, when gunicorn receives a QUIT all the workers are killed immediately and exit and TERM is used for the graceful shutdown. Note: the old behaviour was based on the NGINX but the new one is more correct according the following doc: https://www.gnu.org/software/libc/manual/html_node/Termination-Signals.html also it is complying with the way the signals are sent by heroku: https://devcenter.heroku.com/articles/python-faq#what-constraints-exist-when-developing-applications-on-heroku ### Deprecations `run_gunicorn`, `gunicorn_django` and `gunicorn_paster` are now completely deprecated and will be removed in the next release. Use the `gunicorn` command instead. ### Changes ### core - add aiohttp worker named `gaiohttp` using asyncio. Full async worker on python 3. - fix HTTP-violating excess whitespace in write_error output - fix: try to log what happened in the worker after a timeout, add a `worker_abort` hook on SIGABRT signal. - fix: save listener socket name in workers so we can handle buffered keep-alive requests after the listener has closed. - add on_exit hook called just before exiting gunicorn. - add support for python 3.4 - fix: do not swallow unexpected errors when reaping - fix: remove incompatible SSL option with python 2.6 - add new async gthread worker and `--threads` options allows to set multiple threads to listen on connection - deprecate `gunicorn_django` and `gunicorn_paster` - switch QUIT and TERM signal - reap workers in SIGCHLD handler - add universal wheel support - use `email.utils.formatdate` in gunicorn.util.http_date - deprecate the `--debug` option - fix: log exceptions that occur after response start … - allows loading of applications from `.pyc` files (#693) - fix: issue #691, raw_env config file parsing - use a dynamic timeout to wait for the optimal time. (Reduce power usage) - fix python3 support when notifying the arbiter - add: honor $WEB_CONCURRENCY environment variable. Useful for heroku setups. - add: include tz offset in access log - add: include access logs in the syslog handler. - add --reload option for code reloading - add the capability to load `gunicorn.base.Application` without the loading of the arguments of the command line. It allows you to [embed gunicorn in your own application](custom.md). - improve: set wsgi.multithread to True for async workers - fix logging: make sure to redirect wsgi.errors when needed - add: syslog logging can now be done to a unix socket - fix logging: don't try to redirect stdout/stderr to the logfile. - fix logging: don't propagate log - improve logging: file option can be overridden by the gunicorn options `--error-logfile` and `--access-logfile` if they are given. - fix: don't override SERVER_* by the Host header - fix: handle_error - add more option to configure SSL - fix: sendfile with SSL - add: worker_int callback (to react on SIGTERM) - fix: don't depend on entry point for internal classes, now absolute modules path can be given. - fix: Error messages are now encoded in latin1 - fix: request line length check - improvement: proxy_allow_ips: Allow proxy protocol if "*" specified - fix: run worker's `setup` method before setting num_workers - fix: FileWrapper inherit from `object` now - fix: Error messages are now encoded in latin1 - fix: don't spam the console on SIGWINCH. - fix: logging -don't stringify T and D logging atoms (#621) - add support for the latest django version - deprecate `run_gunicorn` django option - fix: sys imported twice ### gevent worker - fix: make sure to stop all listeners - fix: monkey patching is now done in the worker - fix: "global name 'hub' is not defined" - fix: reinit `hub` on old versions of gevent - support gevent 1.0 - fix: add subprocess in monkey patching - fix: add support for multiple listener ### eventlet worker - fix: merge duplicate EventletWorker.init_process method (fixes #657) - fix: missing errno import for eventlet sendfile patch - fix: add support for multiple listener ### tornado worker - add graceful stop support benoitc-gunicorn-f5fb19e/docs/content/2015-news.md000066400000000000000000000201471514360242400217540ustar00rootroot00000000000000 # Changelog - 2015 !!! note Please see [news](news.md) for the latest changes. ## 19.4.3 / 2015/12/30 - fix: don't check if a file is writable using os.stat with SELINUX ([Issue #1171](https://github.com/benoitc/gunicorn/issues/1171)) ## 19.4.2 / 2015/12/29 ### Core - improvement: handle HaltServer in manage_workers ([Issue #1095](https://github.com/benoitc/gunicorn/issues/1095)) - fix: Do not rely on sendfile sending requested count ([Issue #1155](https://github.com/benoitc/gunicorn/issues/1155)) - fix: claridy --no-sendfile default ([Issue #1156](https://github.com/benoitc/gunicorn/issues/1156)) - fix: LoggingCatch sendfile failure from no file descriptor ([Issue #1160](https://github.com/benoitc/gunicorn/issues/1160)) ### Logging - fix: Always send access log to syslog if syslog is on - fix: check auth before trying to own a file ([Issue #1157](https://github.com/benoitc/gunicorn/issues/1157)) ### Documentation - fix: Fix Slowloris broken link. ([Issue #1142](https://github.com/benoitc/gunicorn/issues/1142)) - Tweak markup in faq.rst ### Testing - fix: gaiohttp test ([Issue #1164](https://github.com/benoitc/gunicorn/issues/1164)) ## 19.4.1 / 2015/11/25 - fix tornado worker ([Issue #1154](https://github.com/benoitc/gunicorn/issues/1154)) ## 19.4.0 / 2015/11/20 ### Core - fix: make sure that a user is able to access to the logs after dropping a privilege ([Issue #1116](https://github.com/benoitc/gunicorn/issues/1116)) - improvement: inherit the `Exception` class where it needs to be ([Issue #997](https://github.com/benoitc/gunicorn/issues/997)) - fix: make sure headers are always encoded as latin1 RFC 2616 ([Issue #1102](https://github.com/benoitc/gunicorn/issues/1102)) - improvement: reduce arbiter noise ([Issue #1078](https://github.com/benoitc/gunicorn/issues/1078)) - fix: don't close the unix socket when the worker exit ([Issue #1088](https://github.com/benoitc/gunicorn/issues/1088)) - improvement: Make last logged worker count an explicit instance var ([Issue #1078](https://github.com/benoitc/gunicorn/issues/1078)) - improvement: prefix config file with its type ([Issue #836](https://github.com/benoitc/gunicorn/issues/836)) - improvement: pidfile handing ([Issue #1042](https://github.com/benoitc/gunicorn/issues/1042)) - fix: catch OSError as well as ValueError on race condition ([Issue #1052](https://github.com/benoitc/gunicorn/issues/1052)) - improve support of ipv6 by backporting urlparse.urlsplit from Python 2.7 to Python 2.6. - fix: raise InvalidRequestLine when the line contains malicious data ([Issue #1023](https://github.com/benoitc/gunicorn/issues/1023)) - fix: fix argument to disable sendfile - fix: add gthread to the list of supported workers ([Issue #1011](https://github.com/benoitc/gunicorn/issues/1011)) - improvement: retry socket binding up to five times upon EADDRNOTAVAIL ([Issue #1004](https://github.com/benoitc/gunicorn/issues/1004)) - **breaking change**: only honor headers that can be encoded in ascii to comply to the RFC 7230 (See [Issue #1151](https://github.com/benoitc/gunicorn/issues/1151)). ### Logging - add new parameters to access log ([Issue #1132](https://github.com/benoitc/gunicorn/issues/1132)) - fix: make sure that files handles are correctly reopened on HUP ([Issue #627](https://github.com/benoitc/gunicorn/issues/627)) - include request URL in error message ([Issue #1071](https://github.com/benoitc/gunicorn/issues/1071)) - get username in access logs ([Issue #1069](https://github.com/benoitc/gunicorn/issues/1069)) - fix statsd logging support on Python 3 ([Issue #1010](https://github.com/benoitc/gunicorn/issues/1010)) ### Testing - use last version of mock. - many fixes in Travis CI support - miscellaneous improvements in tests ### Thread worker - fix: Fix self.nr usage in ThreadedWorker so that auto restart works as expected ([Issue #1031](https://github.com/benoitc/gunicorn/issues/1031)) ### Gevent worker - fix quit signal handling ([Issue #1128](https://github.com/benoitc/gunicorn/issues/1128)) - add support for Python 3 ([Issue #1066](https://github.com/benoitc/gunicorn/issues/1066)) - fix: make graceful shutdown thread-safe ([Issue #1032](https://github.com/benoitc/gunicorn/issues/1032)) ### Tornado worker - fix ssl options ([Issue #1146](https://github.com/benoitc/gunicorn/issues/1146), [Issue #1135](https://github.com/benoitc/gunicorn/issues/1135)) - don't check timeout when stopping gracefully ([Issue #1106](https://github.com/benoitc/gunicorn/issues/1106)) ### AIOHttp worker - add SSL support ([Issue #1105](https://github.com/benoitc/gunicorn/issues/1105)) ### Documentation - fix link to proc name setting ([Issue #1144](https://github.com/benoitc/gunicorn/issues/1144)) - fix worker class documentation ([Issue #1141](https://github.com/benoitc/gunicorn/issues/1141), [Issue #1104](https://github.com/benoitc/gunicorn/issues/1104)) - clarify graceful timeout documentation ([Issue #1137](https://github.com/benoitc/gunicorn/issues/1137)) - don't duplicate NGINX config files examples ([Issue #1050](https://github.com/benoitc/gunicorn/issues/1050), [Issue #1048](https://github.com/benoitc/gunicorn/issues/1048)) - add `web.py` framework example ([Issue #1117](https://github.com/benoitc/gunicorn/issues/1117)) - update Debian/Ubuntu installations instructions ([Issue #1112](https://github.com/benoitc/gunicorn/issues/1112)) - clarify `pythonpath` setting description ([Issue #1080](https://github.com/benoitc/gunicorn/issues/1080)) - tweak some example for python3 - clarify `sendfile` documentation - miscellaneous typos in source code comments (thanks!) - clarify why REMOTE_ADD may not be the user's IP address ([Issue #1037](https://github.com/benoitc/gunicorn/issues/1037)) ### Misc - fix: reloader should survive SyntaxError ([Issue #994](https://github.com/benoitc/gunicorn/issues/994)) - fix: expose the reloader class to the worker. ## 19.3.0 / 2015/03/06 ### Core - fix: [Issue #978](https://github.com/benoitc/gunicorn/issues/978) make sure a listener is inheritable - add `check_config` class method to workers - fix: [Issue #983](https://github.com/benoitc/gunicorn/issues/983) fix select timeout in sync worker with multiple connections - allows workers to access to the reloader. close [Issue #984](https://github.com/benoitc/gunicorn/issues/984) - raise TypeError instead of AssertionError ### Logging - make Logger.loglevel a class attribute ### Documentation - fix: [Issue #988](https://github.com/benoitc/gunicorn/issues/988) fix syntax errors in examples/gunicorn_rc ## 19.2.1 / 2015/02/4 ### Logging - expose loglevel in the Logger class ### AsyncIO worker (gaiohttp.md) - fix [Issue #977](https://github.com/benoitc/gunicorn/issues/977) fix initial crash ### Documentation - document security mailing-list in the contributing page. ## 19.2 / 2015/01/30 ### Core - optimize the sync workers when listening on a single interface - add `--sendfile` settings to enable/disable sendfile. fix [Issue #856](https://github.com/benoitc/gunicorn/issues/856) . - add the selectors module to the code base. [Issue #886](https://github.com/benoitc/gunicorn/issues/886) - add `--max-requests-jitter` setting to set the maximum jitter to add to the max-requests setting. - fix [Issue #899](https://github.com/benoitc/gunicorn/issues/899) propagate proxy_protocol_info to keep-alive requests - fix [Issue #863](https://github.com/benoitc/gunicorn/issues/863) worker timeout: dynamic timeout has been removed - fix: Avoid world writable file ### Logging - fix [Issue #941](https://github.com/benoitc/gunicorn/issues/941) set logconfig default to paster more trivially - add statsd-prefix config setting: set the prefix to use when emitting statsd metrics - [Issue #832](https://github.com/benoitc/gunicorn/issues/832) log to console by default ### Thread Worker - fix [Issue #908](https://github.com/benoitc/gunicorn/issues/908) make sure the worker can continue to accept requests ### Eventlet Worker - fix [Issue #867](https://github.com/benoitc/gunicorn/issues/867) Fix eventlet shutdown to actively shut down the workers. ### Documentation Many improvements and fixes have been done, see the detailed changelog for more information. benoitc-gunicorn-f5fb19e/docs/content/2016-news.md000066400000000000000000000105731514360242400217570ustar00rootroot00000000000000 # Changelog - 2016 !!! note Please see [news](news.md) for the latest changes ## 19.6.0 / 2016/05/21 ### Core & Logging - improvement of the binary upgrade behaviour using USR2: remove file locking ([Issue #1270](https://github.com/benoitc/gunicorn/issues/1270)) - add the ``--capture-output`` setting to capture stdout/stderr tot the log file ([Issue #1271](https://github.com/benoitc/gunicorn/issues/1271)) - Allow disabling ``sendfile()`` via the ``SENDFILE`` environment variable ([Issue #1252](https://github.com/benoitc/gunicorn/issues/1252)) - fix reload under pycharm ([Issue #1129](https://github.com/benoitc/gunicorn/issues/1129)) ### Workers - fix: make sure to remove the signal from the worker pipe ([Issue #1269](https://github.com/benoitc/gunicorn/issues/1269)) - fix: **gthread** worker, handle removed socket in the select loop ([Issue #1258](https://github.com/benoitc/gunicorn/issues/1258)) ## 19.5.0 / 2016/05/10 ### Core - fix: Ensure response to HEAD request won't have message body - fix: lock domain socket and remove on last arbiter exit ([Issue #1220](https://github.com/benoitc/gunicorn/issues/1220)) - improvement: use EnvironmentError instead of socket.error ([Issue #939](https://github.com/benoitc/gunicorn/issues/939)) - add: new ``FORWARDED_ALLOW_IPS`` environment variable ([Issue #1205](https://github.com/benoitc/gunicorn/issues/1205)) - fix: infinite recursion when destroying sockets ([Issue #1219](https://github.com/benoitc/gunicorn/issues/1219)) - fix: close sockets on shutdown ([Issue #922](https://github.com/benoitc/gunicorn/issues/922)) - fix: clean up sys.exc_info calls to drop circular refs ([Issue #1228](https://github.com/benoitc/gunicorn/issues/1228)) - fix: do post_worker_init after load_wsgi ([Issue #1248](https://github.com/benoitc/gunicorn/issues/1248)) ### Workers - fix access logging in gaiohttp worker ([Issue #1193](https://github.com/benoitc/gunicorn/issues/1193)) - eventlet: handle QUIT in a new coroutine ([Issue #1217](https://github.com/benoitc/gunicorn/issues/1217)) - gevent: remove obsolete exception clauses in run ([Issue #1218](https://github.com/benoitc/gunicorn/issues/1218)) - tornado: fix extra "Server" response header ([Issue #1246](https://github.com/benoitc/gunicorn/issues/1246)) - fix: unblock the wait loop under python 3.5 in sync worker ([Issue #1256](https://github.com/benoitc/gunicorn/issues/1256)) ### Logging - fix: log message for listener reloading ([Issue #1181](https://github.com/benoitc/gunicorn/issues/1181)) - Let logging module handle traceback printing ([Issue #1201](https://github.com/benoitc/gunicorn/issues/1201)) - improvement: Allow configuring logger_class with statsd_host ([Issue #1188](https://github.com/benoitc/gunicorn/issues/1188)) - fix: traceback formatting ([Issue #1235](https://github.com/benoitc/gunicorn/issues/1235)) - fix: print error logs on stderr and access logs on stdout ([Issue #1184](https://github.com/benoitc/gunicorn/issues/1184)) ### Documentation - Simplify installation instructions in gunicorn.org ([Issue #1072](https://github.com/benoitc/gunicorn/issues/1072)) - Fix URL and default worker type in example_config ([Issue #1209](https://github.com/benoitc/gunicorn/issues/1209)) - update django doc url to 1.8 lts ([Issue #1213](https://github.com/benoitc/gunicorn/issues/1213)) - fix: miscellaneous wording corrections ([Issue #1216](https://github.com/benoitc/gunicorn/issues/1216)) - Add PSF License Agreement of selectors.py to NOTICE (:issue: `1226`) - document LOGGING overriding ([Issue #1051](https://github.com/benoitc/gunicorn/issues/1051)) - put a note that error logs are only errors from Gunicorn ([Issue #1124](https://github.com/benoitc/gunicorn/issues/1124)) - add a note about the requirements of the threads workers under python 2.x ([Issue #1200](https://github.com/benoitc/gunicorn/issues/1200)) - add access_log_format to config example ([Issue #1251](https://github.com/benoitc/gunicorn/issues/1251)) ### Tests - Use more pytest.raises() in test_http.py ## 19.4.5 / 2016/01/05 - fix: NameError fileno in gunicorn.http.wsgi ([Issue #1178](https://github.com/benoitc/gunicorn/issues/1178)) ## 19.4.4 / 2016/01/04 - fix: check if a fileobject can be used with sendfile(2.md) ([Issue #1174](https://github.com/benoitc/gunicorn/issues/1174)) - doc: be more descriptive in errorlog option ([Issue #1173](https://github.com/benoitc/gunicorn/issues/1173)) benoitc-gunicorn-f5fb19e/docs/content/2017-news.md000066400000000000000000000056751514360242400217670ustar00rootroot00000000000000 # Changelog - 2017 !!! note Please see [news](news.md) for the latest changes ## 19.7.1 / 2017/03/21 - fix: continue if SO_REUSEPORT seems to be available but fails ([Issue #1480](https://github.com/benoitc/gunicorn/issues/1480)) - fix: support non-decimal values for the umask command line option ([Issue #1325](https://github.com/benoitc/gunicorn/issues/1325)) ## 19.7.0 / 2017/03/01 - The previously deprecated ``gunicorn_django`` command has been removed. Use the [gunicorn-cmd](run.md#gunicorn) command-line interface instead. - The previously deprecated ``django_settings`` setting has been removed. Use the [raw-env](reference/settings.md#raw_env) setting instead. - The default value of [ssl-version](reference/settings.md#ssl_version) has been changed from ``ssl.PROTOCOL_TLSv1`` to ``ssl.PROTOCOL_SSLv23``. - fix: initialize the group access list when initgroups is set ([Issue #1297](https://github.com/benoitc/gunicorn/issues/1297)) - add environment variables to gunicorn access log format ([Issue #1291](https://github.com/benoitc/gunicorn/issues/1291)) - add --paste-global-conf option ([Issue #1304](https://github.com/benoitc/gunicorn/issues/1304)) - fix: print access logs to STDOUT ([Issue #1184](https://github.com/benoitc/gunicorn/issues/1184)) - remove upper limit on max header size config ([Issue #1313](https://github.com/benoitc/gunicorn/issues/1313)) - fix: print original exception on AppImportError ([Issue #1334](https://github.com/benoitc/gunicorn/issues/1334)) - use SO_REUSEPORT if available ([Issue #1344](https://github.com/benoitc/gunicorn/issues/1344)) - `fix leak `_ of duplicate file descriptor for bound sockets. - add --reload-engine option, support inotify and other backends ([Issue #1368](https://github.com/benoitc/gunicorn/issues/1368), [Issue #1459](https://github.com/benoitc/gunicorn/issues/1459)) - fix: reject request with invalid HTTP versions - add ``child_exit`` callback ([Issue #1394](https://github.com/benoitc/gunicorn/issues/1394)) - add support for eventlets _AlreadyHandled object ([Issue #1406](https://github.com/benoitc/gunicorn/issues/1406)) - format boot tracebacks properly with reloader ([Issue #1408](https://github.com/benoitc/gunicorn/issues/1408)) - refactor socket activation and fd inheritance for better support of SystemD ([Issue #1310](https://github.com/benoitc/gunicorn/issues/1310)) - fix: o fds are given by default in gunicorn ([Issue #1423](https://github.com/benoitc/gunicorn/issues/1423)) - add ability to pass settings to GUNICORN_CMD_ARGS environment variable which helps in container world ([Issue #1385](https://github.com/benoitc/gunicorn/issues/1385)) - fix: catch access denied to pid file ([Issue #1091](https://github.com/benoitc/gunicorn/issues/1091)) - many additions and improvements to the documentation ### Breaking Change - **Python 2.6.0** is the last supported version benoitc-gunicorn-f5fb19e/docs/content/2018-news.md000066400000000000000000000123131514360242400217530ustar00rootroot00000000000000 # Changelog - 2018 !!! note Please see [news](news.md) for the latest changes ## 19.9.0 / 2018/07/03 - fix: address a regression that prevented syslog support from working ([Issue #1668](https://github.com/benoitc/gunicorn/issues/1668), [PR #1773](https://github.com/benoitc/gunicorn/pull/1773)) - fix: correctly set `REMOTE_ADDR` on versions of Python 3 affected by `Python Issue 30205 `_ ([Issue #1755](https://github.com/benoitc/gunicorn/issues/1755), [PR #1796](https://github.com/benoitc/gunicorn/pull/1796)) - fix: show zero response length correctly in access log ([PR #1787](https://github.com/benoitc/gunicorn/pull/1787)) - fix: prevent raising `AttributeError` when ``--reload`` is not passed in case of a `SyntaxError` raised from the WSGI application. ([Issue #1805](https://github.com/benoitc/gunicorn/issues/1805), [PR #1806](https://github.com/benoitc/gunicorn/pull/1806)) - The internal module ``gunicorn.workers.async`` was renamed to ``gunicorn.workers.base_async`` since ``async`` is now a reserved word in Python 3.7. ([PR #1527](https://github.com/benoitc/gunicorn/pull/1527)) ## 19.8.1 / 2018/04/30 - fix: secure scheme headers when bound to a unix socket ([Issue #1766](https://github.com/benoitc/gunicorn/issues/1766), [PR #1767](https://github.com/benoitc/gunicorn/pull/1767)) ## 19.8.0 / 2018/04/28 - Eventlet 0.21.0 support ([Issue #1584](https://github.com/benoitc/gunicorn/issues/1584)) - Tornado 5 support ([Issue #1728](https://github.com/benoitc/gunicorn/issues/1728), [PR #1752](https://github.com/benoitc/gunicorn/pull/1752)) - support watching additional files with ``--reload-extra-file`` ([PR #1527](https://github.com/benoitc/gunicorn/pull/1527)) - support configuring logging with a dictionary with ``--logging-config-dict`` ([Issue #1087](https://github.com/benoitc/gunicorn/issues/1087), [PR #1110](https://github.com/benoitc/gunicorn/pull/1110), [PR #1602](https://github.com/benoitc/gunicorn/pull/1602)) - add support for the ``--config`` flag in the ``GUNICORN_CMD_ARGS`` environment variable ([Issue #1576](https://github.com/benoitc/gunicorn/issues/1576), [PR #1581](https://github.com/benoitc/gunicorn/pull/1581)) - disable ``SO_REUSEPORT`` by default and add the ``--reuse-port`` setting ([Issue #1553](https://github.com/benoitc/gunicorn/issues/1553), [Issue #1603](https://github.com/benoitc/gunicorn/issues/1603), [PR #1669](https://github.com/benoitc/gunicorn/pull/1669)) - fix: installing `inotify` on MacOS no longer breaks the reloader ([Issue #1540](https://github.com/benoitc/gunicorn/issues/1540), [PR #1541](https://github.com/benoitc/gunicorn/pull/1541)) - fix: do not throw ``TypeError`` when ``SO_REUSEPORT`` is not available ([Issue #1501](https://github.com/benoitc/gunicorn/issues/1501), [PR #1491](https://github.com/benoitc/gunicorn/pull/1491)) - fix: properly decode HTTP paths containing certain non-ASCII characters ([Issue #1577](https://github.com/benoitc/gunicorn/issues/1577), [PR #1578](https://github.com/benoitc/gunicorn/pull/1578)) - fix: remove whitespace when logging header values under gevent ([PR #1607](https://github.com/benoitc/gunicorn/pull/1607)) - fix: close unlinked temporary files ([Issue #1327](https://github.com/benoitc/gunicorn/issues/1327), [PR #1428](https://github.com/benoitc/gunicorn/pull/1428)) - fix: parse ``--umask=0`` correctly ([Issue #1622](https://github.com/benoitc/gunicorn/issues/1622), [PR #1632](https://github.com/benoitc/gunicorn/pull/1632)) - fix: allow loading applications using relative file paths ([Issue #1349](https://github.com/benoitc/gunicorn/issues/1349), [PR #1481](https://github.com/benoitc/gunicorn/pull/1481)) - fix: force blocking mode on the gevent sockets ([Issue #880](https://github.com/benoitc/gunicorn/issues/880), [PR #1616](https://github.com/benoitc/gunicorn/pull/1616)) - fix: preserve leading `/` in request path ([Issue #1512](https://github.com/benoitc/gunicorn/issues/1512), [PR #1511](https://github.com/benoitc/gunicorn/pull/1511)) - fix: forbid contradictory secure scheme headers - fix: handle malformed basic authentication headers in access log ([Issue #1683](https://github.com/benoitc/gunicorn/issues/1683), [PR #1684](https://github.com/benoitc/gunicorn/pull/1684)) - fix: defer handling of ``USR1`` signal to a new greenlet under gevent ([Issue #1645](https://github.com/benoitc/gunicorn/issues/1645), [PR #1651](https://github.com/benoitc/gunicorn/pull/1651)) - fix: the threaded worker would sometimes close the wrong keep-alive connection under Python 2 ([Issue #1698](https://github.com/benoitc/gunicorn/issues/1698), [PR #1699](https://github.com/benoitc/gunicorn/pull/1699)) - fix: re-open log files on ``USR1`` signal using ``handler._open`` to support subclasses of ``FileHandler`` ([Issue #1739](https://github.com/benoitc/gunicorn/issues/1739), [PR #1742](https://github.com/benoitc/gunicorn/pull/1742)) - deprecation: the ``gaiohttp`` worker is deprecated, see the [worker-class](reference/settings.md#worker_class) documentation for more information ([Issue #1338](https://github.com/benoitc/gunicorn/issues/1338), [PR #1418](https://github.com/benoitc/gunicorn/pull/1418), [PR #1569](https://github.com/benoitc/gunicorn/pull/1569)) benoitc-gunicorn-f5fb19e/docs/content/2019-news.md000066400000000000000000000107711514360242400217620ustar00rootroot00000000000000 # Changelog - 2019 !!! note Please see [news](news.md) for the latest changes ## 20.0.4 / 2019/11/26 - fix binding a socket using the file descriptor - remove support for the `bdist_rpm` build ## 20.0.3 / 2019/11/24 - fixed load of a config file without a Python extension - fixed `socketfromfd.fromfd` when defaults are not set !!! note ``` ## 20.0.2 / 2019/11/23 - fix changelog ## 20.0.1 / 2019/11/23 - fixed the way the config module is loaded. `__file__` is now available - fixed `wsgi.input_terminated`. It is always true. - use the highest protocol version of openssl by default - only support Python >= 3.5 - added `__repr__` method to `Config` instance - fixed support of AIX platform and musl libc in `socketfromfd.fromfd` function - fixed support of applications loaded from a factory function - fixed chunked encoding support to prevent any `request smuggling `_ - Capture os.sendfile before patching in gevent and eventlet workers. fix `RecursionError`. - removed locking in reloader when adding new files - load the WSGI application before the loader to pick up all files {note} as documented in Flask and other places. ``` ## 19.10.0 / 2019/11/23 - unblock select loop during reload of a sync worker - security fix: http desync attack - handle `wsgi.input_terminated` - added support for str and bytes in unix socket addresses - fixed `max_requests` setting - headers values are now encoded as LATN1, not ASCII - fixed `InotifyReloadeder`: handle `module.__file__` is None - fixed compatibility with tornado 6 - fixed root logging - Prevent removalof unix sockets from `reuse_port` - Clear tornado ioloop before os.fork - Miscellaneous fixes and improvement for linting using Pylint ## 20.0 / 2019/10/30 - Fixed `fdopen` `RuntimeWarning` in Python 3.8 - Added check and exception for str type on value in Response process_headers method. - Ensure WSGI header value is string before conducting regex search on it. - Added pypy3 to list of tested environments - Grouped `StopIteration` and `KeyboardInterrupt` exceptions with same body together in Arbiter.run() - Added `setproctitle` module to `extras_require` in setup.py - Avoid unnecessary chown of temporary files - Logging: Handle auth type case insensitively - Removed `util.import_module` - Removed fallback for `types.SimpleNamespace` in tests utils - Use `SourceFileLoader` instead instead of `execfile_` - Use `importlib` instead of `__import__` and eval` - Fixed eventlet patching - Added optional `datadog `_ tags for statsd metrics - Header values now are encoded using latin-1, not ascii. - Rewritten `parse_address` util added test - Removed redundant super() arguments - Simplify `futures` import in gthread module - Fixed worker_connections` setting to also affects the Gthread worker type - Fixed setting max_requests - Bump minimum Eventlet and Gevent versions to 0.24 and 1.4 - Use Python default SSL cipher list by default - handle `wsgi.input_terminated` extension - Simplify Paste Deployment documentation - Fix root logging: root and logger are same level. - Fixed typo in ssl_version documentation - Documented systemd deployment unit examples - Added systemd sd_notify support - Fixed typo in gthread.py - Added `tornado `_ 5 and 6 support - Declare our setuptools dependency - Added support to `--bind` to open file descriptors - Document how to serve WSGI app modules from Gunicorn - Provide guidance on X-Forwarded-For access log in documentation - Add support for named constants in the `--ssl-version` flag - Clarify log format usage of header & environment in documentation - Fixed systemd documentation to properly setup gunicorn unix socket - Prevent removal unix socket for reuse_port - Fix `ResourceWarning` when reading a Python config module - Remove unnecessary call to dict keys method - Support str and bytes for UNIX socket addresses - fixed `InotifyReloadeder`: handle `module.__file__` is None - `/dev/shm` as a convenient alternative to making your own tmpfs mount in fchmod FAQ - fix examples to work on python3 - Fix typo in `--max-requests` documentation - Clear tornado ioloop before os.fork - Miscellaneous fixes and improvement for linting using Pylint ### Breaking Change - Removed gaiohttp worker - Drop support for Python 2.x - Drop support for EOL Python 3.2 and 3.3 - Drop support for Paste Deploy server blocks benoitc-gunicorn-f5fb19e/docs/content/2020-news.md000066400000000000000000000001621514360242400217430ustar00rootroot00000000000000 # Changelog - 2020 !!! note Please see [news](news.md) for the latest changes benoitc-gunicorn-f5fb19e/docs/content/2021-news.md000066400000000000000000000035351514360242400217530ustar00rootroot00000000000000 # Changelog - 2021 !!! note Please see [news](news.md) for the latest changes ## 20.1.0 - 2021-02-12 - document WEB_CONCURRENCY is set by, at least, Heroku - capture peername from accept: Avoid calls to getpeername by capturing the peer name returned by accept - log a warning when a worker was terminated due to a signal - fix tornado usage with latest versions of Django - add support for python -m gunicorn - fix systemd socket activation example - allows to set wsgi application in config file using `wsgi_app` - document `--timeout = 0` - always close a connection when the number of requests exceeds the max requests - Disable keepalive during graceful shutdown - kill tasks in the gthread workers during upgrade - fix latency in gevent worker when accepting new requests - fix file watcher: handle errors when new worker reboot and ensure the list of files is kept - document the default name and path of the configuration file - document how variable impact configuration - document the `$PORT` environment variable - added milliseconds option to request_time in access_log - added PIP requirements to be used for example - remove version from the Server header - fix sendfile: use `socket.sendfile` instead of `os.sendfile` - reloader: use absolute path to prevent empty to prevent0 `InotifyError` when a file is added to the working directory - Add --print-config option to print the resolved settings at startup. - remove the `--log-dict-config` CLI flag because it never had a working format (the `logconfig_dict` setting in configuration files continues to work) ### Breaking changes - minimum version is Python 3.5 - remove version from the Server header ** Documentation ** ** Others ** - miscellaneous changes in the code base to be a better citizen with Python 3 - remove dead code - fix documentation generation benoitc-gunicorn-f5fb19e/docs/content/2023-news.md000066400000000000000000000014721514360242400217530ustar00rootroot00000000000000 # Changelog - 2023 ## 21.2.0 - 2023-07-19 - fix thread worker: revert change considering connection as idle . !!! note This is fixing the bad file description error. 21.1.0 - 2023-07-18 =================== - fix thread worker: fix socket removal from the queue ## 21.0.1 - 2023-07-17 - fix documentation build ## 21.0.0 - 2023-07-17 - support python 3.11 - fix gevent and eventlet workers - fix threads support (gththread.md): improve performance and unblock requests - SSL: now use SSLContext object - HTTP parser: miscellaneous fixes - remove unnecessary setuid calls - fix testing - improve logging - miscellaneous fixes to core engine *** RELEASE NOTE *** We made this release major to start our new release cycle. More info will be provided on our discussion forum. benoitc-gunicorn-f5fb19e/docs/content/2024-news.md000066400000000000000000000102641514360242400217530ustar00rootroot00000000000000 # Changelog - 2024 ## 23.0.0 - 2024-08-10 - minor docs fixes ([PR #3217](https://github.com/benoitc/gunicorn/pull/3217), [PR #3089](https://github.com/benoitc/gunicorn/pull/3089), [PR #3167](https://github.com/benoitc/gunicorn/pull/3167)) - worker_class parameter accepts a class ([PR #3079](https://github.com/benoitc/gunicorn/pull/3079)) - fix deadlock if request terminated during chunked parsing ([PR #2688](https://github.com/benoitc/gunicorn/pull/2688)) - permit receiving Transfer-Encodings: compress, deflate, gzip ([PR #3261](https://github.com/benoitc/gunicorn/pull/3261)) - permit Transfer-Encoding headers specifying multiple encodings. note: no parameters, still ([PR #3261](https://github.com/benoitc/gunicorn/pull/3261)) - sdist generation now explicitly excludes sphinx build folder ([PR #3257](https://github.com/benoitc/gunicorn/pull/3257)) - decode bytes-typed status (as can be passed by gevent) as utf-8 instead of raising `TypeError` ([PR #2336](https://github.com/benoitc/gunicorn/pull/2336)) - raise correct Exception when encounting invalid chunked requests ([PR #3258](https://github.com/benoitc/gunicorn/pull/3258)) - the SCRIPT_NAME and PATH_INFO headers, when received from allowed forwarders, are no longer restricted for containing an underscore ([PR #3192](https://github.com/benoitc/gunicorn/pull/3192)) - include IPv6 loopback address ``[::1]`` in default for [forwarded-allow-ips](reference/settings.md#forwarded_allow_ips) and [proxy-allow-ips](reference/settings.md#proxy_allow_ips) ([PR #3192](https://github.com/benoitc/gunicorn/pull/3192)) !!! note - The SCRIPT_NAME change mitigates a regression that appeared first in the 22.0.0 release - Review your [forwarded-allow-ips](reference/settings.md#forwarded_allow_ips) setting if you are still not seeing the SCRIPT_NAME transmitted - Review your [forwarder-headers](reference/settings.md#forwarder_headers) setting if you are missing headers after upgrading from a version prior to 22.0.0 ### Breaking changes - refuse requests where the uri field is empty ([PR #3255](https://github.com/benoitc/gunicorn/pull/3255)) - refuse requests with invalid CR/LR/NUL in heade field values ([PR #3253](https://github.com/benoitc/gunicorn/pull/3253)) - remove temporary ``--tolerate-dangerous-framing`` switch from 22.0 ([PR #3260](https://github.com/benoitc/gunicorn/pull/3260)) - If any of the breaking changes affect you, be aware that now refused requests can post a security problem, especially so in setups involving request pipe-lining and/or proxies. ## 22.0.0 - 2024-04-17 - use `utime` to notify workers liveness - migrate setup to pyproject.toml - fix numerous security vulnerabilities in HTTP parser (closing some request smuggling vectors) - parsing additional requests is no longer attempted past unsupported request framing - on HTTP versions < 1.1 support for chunked transfer is refused (only used in exploits) - requests conflicting configured or passed SCRIPT_NAME now produce a verbose error - Trailer fields are no longer inspected for headers indicating secure scheme - support Python 3.12 ### Breaking changes - minimum version is Python 3.7 - the limitations on valid characters in the HTTP method have been bounded to Internet Standards - requests specifying unsupported transfer coding (order.md) are refused by default (rare.md) - HTTP methods are no longer casefolded by default (IANA method registry contains none affected) - HTTP methods containing the number sign (#) are no longer accepted by default (rare.md) - HTTP versions < 1.0 or >= 2.0 are no longer accepted by default (rare, only HTTP/1.1 is supported) - HTTP versions consisting of multiple digits or containing a prefix/suffix are no longer accepted - HTTP header field names Gunicorn cannot safely map to variables are silently dropped, as in other software - HTTP headers with empty field name are refused by default (no legitimate use cases, used in exploits) - requests with both Transfer-Encoding and Content-Length are refused by default (such a message might indicate an attempt to perform request smuggling) - empty transfer codings are no longer permitted (reportedly seen with really old & broken proxies) ### Security - fix CVE-2024-1135 benoitc-gunicorn-f5fb19e/docs/content/2026-news.md000066400000000000000000000303761514360242400217630ustar00rootroot00000000000000 # Changelog - 2026 ## 25.1.0 - 2026-02-13 ### New Features - **Control Interface (gunicornc)**: Add interactive control interface for managing running Gunicorn instances, similar to birdc for BIRD routing daemon ([PR #3505](https://github.com/benoitc/gunicorn/pull/3505)) - Unix socket-based communication with JSON protocol - Interactive mode with readline support and command history - Commands: `show all/workers/dirty/config/stats/listeners` - Worker management: `worker add/remove/kill`, `dirty add/remove` - Server control: `reload`, `reopen`, `shutdown` - New settings: `--control-socket`, `--control-socket-mode`, `--no-control-socket` - New CLI tool: `gunicornc` for connecting to control socket - See [Control Interface Guide](guides/gunicornc.md) for details - **Dirty Stash**: Add global shared state between workers via `dirty.stash` ([PR #3503](https://github.com/benoitc/gunicorn/pull/3503)) - In-memory key-value store accessible by all workers - Supports get, set, delete, clear, keys, and has operations - Useful for sharing state like feature flags, rate limits, or cached data - **Dirty Binary Protocol**: Implement efficient binary protocol for dirty arbiter IPC using TLV (Type-Length-Value) encoding ([PR #3500](https://github.com/benoitc/gunicorn/pull/3500)) - More efficient than JSON for binary data - Supports all Python types: str, bytes, int, float, bool, None, list, dict - Better performance for large payloads - **Dirty TTIN/TTOU Signals**: Add dynamic worker scaling for dirty arbiters ([PR #3504](https://github.com/benoitc/gunicorn/pull/3504)) - Send SIGTTIN to increase dirty workers - Send SIGTTOU to decrease dirty workers - Respects minimum worker constraints from app configurations ### Changes - **ASGI Worker**: Promoted from beta to stable - **Dirty Arbiters**: Now marked as beta feature ### Documentation - Fix Markdown formatting in /configure documentation --- ## 25.0.3 - 2026-02-07 ### Bug Fixes - Fix RuntimeError when StopIteration is raised inside ASGI response body coroutine (PEP 479 compliance) - Fix deprecation warning for passing maxsplit as positional argument in `re.split()` (Python 3.13+) --- ## 25.0.2 - 2026-02-06 ### Bug Fixes - Fix ASGI concurrent request failures through nginx proxy by normalizing sockaddr tuples to handle both 2-tuple (IPv4) and 4-tuple (IPv6) formats ([PR #3485](https://github.com/benoitc/gunicorn/pull/3485)) - Fix graceful disconnect handling for ASGI worker to properly handle client disconnects without raising exceptions ([PR #3485](https://github.com/benoitc/gunicorn/pull/3485)) - Fix lazy import of dirty module for gevent compatibility - prevents import errors when concurrent.futures is imported before gevent monkey-patching ([PR #3483](https://github.com/benoitc/gunicorn/pull/3483)) ### Changes - Refactor: Extract `_normalize_sockaddr` utility function for consistent socket address handling across workers - Add license headers to all Python source files - Update copyright year to 2026 in LICENSE and NOTICE files --- ## 25.0.1 - 2026-02-02 ### Bug Fixes - Fix ASGI streaming responses (SSE) hanging: add chunked transfer encoding for HTTP/1.1 responses without Content-Length header. Without chunked encoding, clients wait for connection close to determine end-of-response. ### Changes - Update celery_alternative example to use FastAPI with native ASGI worker and uvloop for async task execution ### Testing - Add ASGI compliance test suite with Docker-based integration tests covering HTTP, WebSocket, streaming, lifespan, framework integration (Starlette, FastAPI), HTTP/2, and concurrency scenarios --- ## 25.0.0 - 2026-02-01 ### New Features - **Dirty Arbiters**: Separate process pool for executing long-running, blocking operations (AI model loading, heavy computation) without blocking HTTP workers ([PR #3460](https://github.com/benoitc/gunicorn/pull/3460)) - Inspired by Erlang's dirty schedulers - Asyncio-based with Unix socket IPC - Stateful workers that persist loaded resources - New settings: `--dirty-app`, `--dirty-workers`, `--dirty-timeout`, `--dirty-threads`, `--dirty-graceful-timeout` - Lifecycle hooks: `on_dirty_starting`, `dirty_post_fork`, `dirty_worker_init`, `dirty_worker_exit` - **Per-App Worker Allocation for Dirty Arbiters**: Control how many dirty workers load each app for memory optimization with heavy models ([PR #3473](https://github.com/benoitc/gunicorn/pull/3473)) - Set `workers` class attribute on DirtyApp (e.g., `workers = 2`) - Or use config format `module:class:N` (e.g., `myapp:HeavyModel:2`) - Requests automatically routed to workers with the target app - New exception `DirtyNoWorkersAvailableError` for graceful error handling - Example: 8 workers × 10GB model = 80GB → with `workers=2`: 20GB (75% savings) - **HTTP/2 Support (Beta)**: Native HTTP/2 (RFC 7540) support for improved performance with modern clients ([PR #3468](https://github.com/benoitc/gunicorn/pull/3468)) - Multiplexed streams over a single connection - Header compression (HPACK) - Flow control and stream prioritization - Works with gthread, gevent, and ASGI workers - New settings: `--http-protocols`, `--http2-max-concurrent-streams`, `--http2-initial-window-size`, `--http2-max-frame-size`, `--http2-max-header-list-size` - Requires SSL/TLS and h2 library: `pip install gunicorn[http2]` - See [HTTP/2 Guide](guides/http2.md) for details - New example: `examples/http2_gevent/` with Docker and tests - **HTTP 103 Early Hints**: Support for RFC 8297 Early Hints to enable browsers to preload resources before the final response ([PR #3468](https://github.com/benoitc/gunicorn/pull/3468)) - WSGI: `environ['wsgi.early_hints'](headers)` callback - ASGI: `http.response.informational` message type - Works with both HTTP/1.1 and HTTP/2 - **uWSGI Protocol for ASGI Worker**: The ASGI worker now supports receiving requests via the uWSGI binary protocol from nginx ([PR #3467](https://github.com/benoitc/gunicorn/pull/3467)) ### Bug Fixes - Fix HTTP/2 ALPN negotiation for gevent and eventlet workers when `do_handshake_on_connect` is False (the default). The TLS handshake is now explicitly performed before checking `selected_alpn_protocol()`. - Fix setproctitle initialization with systemd socket activation ([#3465](https://github.com/benoitc/gunicorn/issues/3465)) - Fix `Expect: 100-continue` handling: ignore the header for HTTP/1.0 requests since 100-continue is only valid for HTTP/1.1+ ([PR #3463](https://github.com/benoitc/gunicorn/pull/3463)) - Fix missing `_expected_100_continue` attribute in UWSGIRequest - Disable setproctitle on macOS to prevent segfaults during process title updates - Publish full exception traceback when the application fails to load ([#3462](https://github.com/benoitc/gunicorn/issues/3462)) ### Deprecations - **Eventlet Worker**: The `eventlet` worker is deprecated and will be removed in Gunicorn 26.0. Eventlet itself is [no longer actively maintained](https://eventlet.readthedocs.io/en/latest/asyncio/migration.html). Please migrate to `gevent`, `gthread`, or another supported worker type. ### Changes - Remove obsolete Makefile targets ([PR #3471](https://github.com/benoitc/gunicorn/pull/3471)) --- ## 24.1.1 - 2026-01-24 ### Bug Fixes - Fix `forwarded_allow_ips` and `proxy_allow_ips` to remain as strings for backward compatibility with external tools like uvicorn. Network validation now uses strict mode to detect invalid CIDR notation (e.g., `192.168.1.1/24` where host bits are set) ([#3458](https://github.com/benoitc/gunicorn/issues/3458), [PR #3459](https://github.com/benoitc/gunicorn/pull/3459)) --- ## 24.1.0 - 2026-01-23 ### New Features - **Official Docker Image**: Gunicorn now publishes official Docker images to GitHub Container Registry at `ghcr.io/benoitc/gunicorn` - Based on Python 3.12 slim image - Uses recommended worker formula (2 × CPU + 1) - Configurable via environment variables - **PROXY Protocol v2 Support**: Extended PROXY protocol implementation to support the binary v2 format in addition to the existing text-based v1 format ([PR #3451](https://github.com/benoitc/gunicorn/pull/3451)) - New `--proxy-protocol` modes: `off`, `v1`, `v2`, `auto` - `auto` mode (default when enabled) detects v1 or v2 automatically - v2 binary format is more efficient and supports additional metadata - Works with HAProxy, AWS NLB/ALB, and other PROXY protocol v2 sources - **CIDR Network Support**: `--forwarded-allow-ips` and `--proxy-allow-from` now accept CIDR notation (e.g., `192.168.0.0/16`) for specifying trusted networks ([PR #3449](https://github.com/benoitc/gunicorn/pull/3449)) - **Socket Backlog Metric**: New `gunicorn.socket.backlog` gauge metric reports the current socket backlog size on Linux systems ([PR #3450](https://github.com/benoitc/gunicorn/pull/3450)) - **InotifyReloader Enhancement**: The inotify-based reloader now watches newly imported modules, not just those loaded at startup ([PR #3447](https://github.com/benoitc/gunicorn/pull/3447)) ### Bug Fixes - Fix signal handling regression where SIGCLD alias caused "Unhandled signal: cld" errors on Linux when workers fail during boot ([#3453](https://github.com/benoitc/gunicorn/discussions/3453)) - Fix socket blocking mode on keepalive connections preventing SSL handshake failures with async workers ([PR #3452](https://github.com/benoitc/gunicorn/pull/3452)) - Use smaller buffer size in `finish_body()` for faster timeout detection on slow or abandoned connections ([PR #3453](https://github.com/benoitc/gunicorn/pull/3453)) - Handle `SSLWantReadError` in `finish_body()` to prevent worker hangs during SSL renegotiation ([PR #3448](https://github.com/benoitc/gunicorn/pull/3448)) - Log SIGTERM as info level instead of warning to reduce noise in orchestrated environments ([PR #3446](https://github.com/benoitc/gunicorn/pull/3446)) - Print exception details to stderr when worker fails to boot ([PR #3443](https://github.com/benoitc/gunicorn/pull/3443)) - Fix `unreader.unread()` to prepend data to buffer instead of appending ([PR #3442](https://github.com/benoitc/gunicorn/pull/3442)) - Prevent `RecursionError` when pickling Config objects ([PR #3441](https://github.com/benoitc/gunicorn/pull/3441)) - Use proper exception chaining with `raise from` in glogging.py ([PR #3440](https://github.com/benoitc/gunicorn/pull/3440)) --- ## 24.0.0 - 2026-01-23 ### New Features - **ASGI Worker (Beta)**: Native asyncio-based ASGI support for running async Python frameworks like FastAPI, Starlette, and Quart without external dependencies ([PR #3444](https://github.com/benoitc/gunicorn/pull/3444)) - HTTP/1.1 with keepalive connections - WebSocket support - Lifespan protocol for startup/shutdown hooks - Optional uvloop for improved performance - New settings: `--asgi-loop`, `--asgi-lifespan`, `--root-path` - **uWSGI Binary Protocol**: Support for receiving requests from nginx via `uwsgi_pass` directive, enabling efficient binary protocol communication ([PR #3444](https://github.com/benoitc/gunicorn/pull/3444)) - New settings: `--protocol uwsgi`, `--uwsgi-allow-from` - **Documentation Migration**: Migrated documentation from Sphinx to MkDocs with Material theme for improved navigation and mobile experience ([PR #3426](https://github.com/benoitc/gunicorn/pull/3426)) ### Security - **eventlet**: Require eventlet >= 0.40.3 to address CVE-2021-21419 (websocket memory exhaustion) and CVE-2025-58068 (HTTP request smuggling) ([PR #3445](https://github.com/benoitc/gunicorn/pull/3445)) - **gevent**: Require gevent >= 24.10.1 to address CVE-2023-41419 (HTTP request smuggling) and CVE-2024-3219 (socket.socketpair vulnerability) ([PR #3445](https://github.com/benoitc/gunicorn/pull/3445)) - **tornado**: Require tornado >= 6.5.0 to address CVE-2025-47287 (HTTP request smuggling) and other security fixes ([PR #3445](https://github.com/benoitc/gunicorn/pull/3445)) ### Changes - Documentation now hosted at https://gunicorn.org - Updated license configuration to PEP 639 format for uv compatibility !!! warning "ASGI Worker Beta" The ASGI worker is a beta feature. While tested, the API and behavior may change in future releases. Please report any issues on GitHub. benoitc-gunicorn-f5fb19e/docs/content/404.md000066400000000000000000000012271514360242400207200ustar00rootroot00000000000000# Page Not Found The page you're looking for doesn't exist or has moved. benoitc-gunicorn-f5fb19e/docs/content/CNAME000066400000000000000000000000141514360242400206260ustar00rootroot00000000000000gunicorn.orgbenoitc-gunicorn-f5fb19e/docs/content/asgi.md000066400000000000000000000144251514360242400213400ustar00rootroot00000000000000# ASGI Worker Gunicorn includes a native ASGI worker that enables running async Python web frameworks like FastAPI, Starlette, and Quart without external dependencies like Uvicorn. ## Quick Start ```bash # Install gunicorn pip install gunicorn # Run an ASGI application gunicorn myapp:app --worker-class asgi --workers 4 ``` For FastAPI applications: ```bash gunicorn main:app --worker-class asgi --bind 0.0.0.0:8000 ``` ## Features The ASGI worker provides: - **HTTP/1.1** with keepalive connections - **WebSocket** support for real-time applications - **Lifespan protocol** for startup/shutdown hooks - **Optional uvloop** for improved performance - **SSL/TLS** support - **uWSGI protocol** for nginx `uwsgi_pass` integration ## Configuration ### Worker Class Set the worker class to `asgi`: ```bash gunicorn myapp:app --worker-class asgi ``` Or in a configuration file: ```python # gunicorn.conf.py worker_class = "asgi" ``` ### Event Loop Control which asyncio event loop implementation to use: | Value | Description | |----------|-------------| | `auto` | Use uvloop if available, otherwise asyncio (default) | | `asyncio`| Use Python's built-in asyncio event loop | | `uvloop` | Use uvloop (must be installed separately) | ```bash gunicorn myapp:app --worker-class asgi --asgi-loop uvloop ``` To use uvloop, install it first: ```bash pip install uvloop ``` ### Lifespan Protocol The lifespan protocol lets your application run code at startup and shutdown. This is essential for frameworks that need to initialize database connections, caches, or background tasks. | Value | Description | |--------|-------------| | `auto` | Detect if app supports lifespan, enable if so (default) | | `on` | Always run lifespan protocol (fail if unsupported) | | `off` | Never run lifespan protocol | ```bash gunicorn myapp:app --worker-class asgi --asgi-lifespan on ``` Example FastAPI application using lifespan: ```python from contextlib import asynccontextmanager from fastapi import FastAPI @asynccontextmanager async def lifespan(app: FastAPI): # Startup: initialize resources print("Starting up...") yield # Shutdown: cleanup resources print("Shutting down...") app = FastAPI(lifespan=lifespan) ``` ### Root Path When running behind a reverse proxy that mounts your application at a subpath, set `root_path` so your application knows its mount point: ```bash gunicorn myapp:app --worker-class asgi --root-path /api ``` This is equivalent to the `SCRIPT_NAME` in WSGI applications. ### Worker Connections Control the maximum number of concurrent connections per worker: ```bash gunicorn myapp:app --worker-class asgi --worker-connections 1000 ``` !!! note Unlike sync workers, the `--threads` option has no effect on ASGI workers. Use `--worker-connections` to control concurrency. ## WebSocket Support The ASGI worker supports WebSocket connections out of the box. No additional configuration is required. Example with Starlette: ```python from starlette.applications import Starlette from starlette.routing import WebSocketRoute async def websocket_endpoint(websocket): await websocket.accept() while True: data = await websocket.receive_text() await websocket.send_text(f"Echo: {data}") app = Starlette(routes=[ WebSocketRoute("/ws", websocket_endpoint), ]) ``` ## Production Deployment ### With Nginx (HTTP Proxy) ```nginx upstream gunicorn { server 127.0.0.1:8000; } server { listen 80; server_name example.com; location / { proxy_pass http://gunicorn; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; } # WebSocket support location /ws { proxy_pass http://gunicorn; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_set_header Host $host; } } ``` ### With Nginx (uWSGI Protocol) For better performance, you can use nginx's native uWSGI protocol support: ```bash gunicorn myapp:app --worker-class asgi --protocol uwsgi --bind 127.0.0.1:8000 ``` ```nginx upstream gunicorn { server 127.0.0.1:8000; } server { listen 80; server_name example.com; location / { uwsgi_pass gunicorn; include uwsgi_params; } } ``` !!! note WebSocket connections are not supported when using the uWSGI protocol. Use HTTP proxy for WebSocket endpoints. See [uWSGI Protocol](uwsgi.md) for more details on uWSGI protocol configuration. ### Recommended Settings For production ASGI deployments: ```python # gunicorn.conf.py worker_class = "asgi" workers = 4 # Number of worker processes worker_connections = 1000 # Max connections per worker keepalive = 5 # Keepalive timeout timeout = 120 # Worker timeout graceful_timeout = 30 # Graceful shutdown timeout # Performance tuning asgi_loop = "auto" # Use uvloop if available asgi_lifespan = "auto" # Auto-detect lifespan support ``` ## Comparison with Other ASGI Servers | Feature | Gunicorn ASGI | Uvicorn | Hypercorn | |---------|---------------|---------|-----------| | Process management | Built-in | External | Built-in | | HTTP/2 | Yes | No | Yes | | WebSocket | Yes | Yes | Yes | | Lifespan | Yes | Yes | Yes | | uvloop support | Yes | Yes | Yes | !!! note HTTP/2 requires SSL/TLS and the h2 library. See [HTTP/2 Support](guides/http2.md) for details. Gunicorn's ASGI worker provides the same process management, logging, and configuration capabilities you're familiar with from WSGI deployments. ## Troubleshooting ### Lifespan startup failed If you see "ASGI lifespan startup failed", your application may not properly implement the lifespan protocol. Either fix the application or set `--asgi-lifespan off`. ### Connection limits If you're hitting connection limits, increase `--worker-connections` or add more workers with `--workers`. ### Slow responses under load Try using uvloop for better performance: ```bash pip install uvloop gunicorn myapp:app --worker-class asgi --asgi-loop uvloop ``` ## See Also - [Settings Reference](reference/settings.md#asgi_loop) - All ASGI-related settings - [Deploy](deploy.md) - General deployment guidance - [Design](design.md) - Worker architecture overview benoitc-gunicorn-f5fb19e/docs/content/assets/000077500000000000000000000000001514360242400213675ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/assets/gunicorn.svg000066400000000000000000000307761514360242400237510ustar00rootroot00000000000000 image/svg+xml g benoitc-gunicorn-f5fb19e/docs/content/assets/javascripts/000077500000000000000000000000001514360242400237205ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/assets/javascripts/toc-collapse.js000066400000000000000000000044621514360242400266510ustar00rootroot00000000000000// Collapsible TOC for settings page (function() { function initCollapsibleTOC() { // Only apply to pages with many TOC items (like settings) var tocNav = document.querySelector('.md-nav--secondary'); if (!tocNav) return; // Skip if already initialized if (tocNav.dataset.tocCollapse === 'true') return; tocNav.dataset.tocCollapse = 'true'; var tocItems = tocNav.querySelectorAll('.md-nav__item'); if (tocItems.length < 20) return; // Find all top-level TOC items that have nested lists var topList = tocNav.querySelector('.md-nav__list'); if (!topList) return; var sections = topList.children; for (var i = 0; i < sections.length; i++) { (function(section) { var nestedNav = section.querySelector('.md-nav'); if (!nestedNav) return; var link = section.querySelector('.md-nav__link'); if (!link) return; // Skip if already has toggle if (link.querySelector('.toc-toggle')) return; // Collapse by default nestedNav.style.display = 'none'; // Create toggle button var toggle = document.createElement('span'); toggle.className = 'toc-toggle'; toggle.innerHTML = '+'; toggle.style.float = 'right'; toggle.style.marginRight = '0.5rem'; toggle.style.fontWeight = 'bold'; toggle.style.cursor = 'pointer'; toggle.style.userSelect = 'none'; link.appendChild(toggle); // Toggle function for this specific section function toggleSection(e) { if (e) { e.preventDefault(); e.stopPropagation(); } if (nestedNav.style.display === 'none') { nestedNav.style.display = 'block'; toggle.innerHTML = '−'; } else { nestedNav.style.display = 'none'; toggle.innerHTML = '+'; } } // Click on toggle button toggle.onclick = toggleSection; })(sections[i]); } } // Run on DOM ready if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', initCollapsibleTOC); } else { initCollapsibleTOC(); } // Re-run on instant navigation (MkDocs Material) if (typeof document$ !== 'undefined') { document$.subscribe(initCollapsibleTOC); } })(); benoitc-gunicorn-f5fb19e/docs/content/assets/stylesheets/000077500000000000000000000000001514360242400237435ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/assets/stylesheets/home.css000066400000000000000000000210241514360242400254040ustar00rootroot00000000000000/* ============================================ Gunicorn Landing Page Inspired by Caddy: minimal, spacious, clean ============================================ */ .home { --accent: #00a650; --accent-hover: #00c853; --accent-dark: #008542; --teal: #00bfa5; --text: #1a1a2e; --text-muted: #555; --bg: #fff; --bg-alt: #f8faf8; --border: #e0e6e0; --code-bg: #0d1117; --max-width: 900px; width: 100%; max-width: none; margin: 0; padding: 0; font-size: 1.0625rem; line-height: 1.7; color: var(--text); } [data-md-color-scheme="slate"] .home { --text: #e6e6e6; --text-muted: #aaa; --bg: #0d1117; --bg-alt: #161b22; --border: #30363d; } /* Remove MkDocs constraints */ .md-main__inner { margin: 0; max-width: none; } .md-content { max-width: none; } .md-content__inner { margin: 0; padding: 0; } /* ============================================ Sections - Caddy-style vertical flow ============================================ */ .home section { padding: 5rem 2rem; } .home section:nth-child(even) { background: var(--bg-alt); } .home .container { max-width: var(--max-width); margin: 0 auto; } /* ============================================ Hero ============================================ */ .hero { text-align: center; padding: 6rem 2rem 5rem; } .hero .container { max-width: 700px; } .hero__logo { width: 350px !important; max-width: 350px !important; min-width: 350px; height: auto; margin-bottom: 2rem; } .hero h1 { font-size: 3rem; font-weight: 700; line-height: 1.15; margin: 0 0 1.5rem 0; letter-spacing: -0.02em; white-space: nowrap; } .hero__tagline { font-size: 1.25rem; color: var(--text-muted); margin: 0 0 2.5rem 0; max-width: 550px; margin-left: auto; margin-right: auto; } .hero__buttons { display: flex; gap: 1rem; justify-content: center; flex-wrap: wrap; margin-bottom: 3rem; } .btn { display: inline-flex; align-items: center; gap: 0.5rem; padding: 0.875rem 1.75rem; font-size: 1rem; font-weight: 500; text-decoration: none; border-radius: 6px; transition: all 0.15s ease; } .btn--primary { background: linear-gradient(135deg, var(--accent) 0%, var(--accent-hover) 100%); color: #fff; box-shadow: 0 4px 12px rgba(0, 166, 80, 0.3); } .btn--primary:hover { box-shadow: 0 6px 20px rgba(0, 166, 80, 0.4); transform: translateY(-2px); } .btn--secondary { background: transparent; color: var(--text); border: 1px solid var(--border); } .btn--secondary:hover { border-color: var(--accent); color: var(--accent); } /* Terminal */ .terminal { background: var(--code-bg); border-radius: 8px; overflow: hidden; text-align: left; max-width: 500px; margin: 0 auto; box-shadow: 0 8px 30px rgba(0,0,0,0.12); } .terminal__header { background: #161b22; padding: 0.75rem 1rem; display: flex; gap: 6px; } .terminal__dot { width: 12px; height: 12px; border-radius: 50%; } .terminal__dot--red { background: #ff5f56; } .terminal__dot--yellow { background: #ffbd2e; } .terminal__dot--green { background: #27c93f; } .terminal__body { padding: 1.25rem 1.5rem; font-family: 'SF Mono', Monaco, Consolas, monospace; font-size: 0.9rem; line-height: 1.8; color: #c9d1d9; } .terminal__line { display: block; } .terminal__prompt { color: var(--accent-hover); user-select: none; } .terminal__comment { color: #6e7681; } /* ============================================ Why Gunicorn - 3 pillars ============================================ */ .why h2 { text-align: center; font-size: 2rem; margin: 0 0 3rem 0; } .pillars { display: grid; grid-template-columns: repeat(3, 1fr); gap: 2rem; } .pillar h3 { font-size: 1.125rem; margin: 0 0 0.5rem 0; } .pillar p { color: var(--text-muted); margin: 0; font-size: 0.9375rem; } /* ============================================ Frameworks ============================================ */ .frameworks h2 { text-align: center; font-size: 1.75rem; margin: 0 0 0.5rem 0; } .frameworks__subtitle { text-align: center; color: var(--text-muted); margin: 0 0 2rem 0; } .frameworks__list { display: flex; flex-wrap: wrap; justify-content: center; gap: 0.75rem; } .framework-tag { padding: 0.5rem 1rem; background: var(--bg); border: 1px solid var(--border); border-radius: 100px; font-size: 0.875rem; font-weight: 500; transition: all 0.15s ease; } [data-md-color-scheme="slate"] .framework-tag { background: var(--bg-alt); } .framework-tag:hover { border-color: var(--accent); color: var(--accent); } .framework-tag--new { background: var(--accent); color: #fff; border-color: var(--accent); } /* ============================================ Workers ============================================ */ .workers h2 { font-size: 1.75rem; margin: 0 0 2rem 0; } .workers__grid { display: grid; grid-template-columns: 1fr 1fr; gap: 1rem; } .worker { padding: 1.5rem; background: var(--bg); border: 1px solid var(--border); border-radius: 8px; text-decoration: none; color: inherit; transition: border-color 0.15s ease; } [data-md-color-scheme="slate"] .worker { background: var(--bg-alt); } .worker:hover { border-color: var(--accent); } .worker h3 { font-size: 1rem; margin: 0 0 0.25rem 0; display: flex; align-items: center; gap: 0.5rem; } .worker p { color: var(--text-muted); font-size: 0.875rem; margin: 0; } .badge { font-size: 0.625rem; font-weight: 700; padding: 0.125rem 0.375rem; background: var(--accent); color: #fff; border-radius: 3px; text-transform: uppercase; letter-spacing: 0.05em; } /* ============================================ Quick Links ============================================ */ .quick-links { text-align: center; } .quick-links h2 { font-size: 1.75rem; margin: 0 0 2rem 0; } .quick-links__grid { display: grid; grid-template-columns: repeat(4, 1fr); gap: 1rem; text-align: left; } .quick-link { padding: 1.25rem; background: var(--bg); border: 1px solid var(--border); border-radius: 8px; text-decoration: none; color: inherit; transition: border-color 0.15s ease; } [data-md-color-scheme="slate"] .quick-link { background: var(--bg-alt); } .quick-link:hover { border-color: var(--accent); } .quick-link strong { display: block; margin-bottom: 0.25rem; } .quick-link span { font-size: 0.875rem; color: var(--text-muted); } /* ============================================ Sponsors ============================================ */ .sponsors { text-align: center; } .sponsors h2 { font-size: 1.75rem; margin: 0 0 0.5rem 0; } .sponsors p { color: var(--text-muted); margin: 0 0 2rem 0; } .sponsors__logos { display: flex; flex-wrap: wrap; justify-content: center; align-items: center; gap: 2rem; margin-bottom: 2rem; min-height: 60px; } .sponsors__logos img { max-height: 50px; max-width: 150px; filter: grayscale(100%); opacity: 0.7; transition: all 0.15s ease; } .sponsors__logos img:hover { filter: grayscale(0%); opacity: 1; } [data-md-color-scheme="slate"] .sponsors__logos img { filter: grayscale(100%) brightness(1.5); } [data-md-color-scheme="slate"] .sponsors__logos img:hover { filter: grayscale(0%) brightness(1); } .sponsors__placeholder { color: var(--text-muted); font-size: 0.875rem; padding: 1rem 2rem; border: 2px dashed var(--border); border-radius: 8px; } /* ============================================ Footer CTA ============================================ */ .home-footer { text-align: center; } .home-footer h2 { font-size: 1.75rem; margin: 0 0 1rem 0; } .home-footer p { color: var(--text-muted); margin: 0 0 2rem 0; } .home-footer__links { display: flex; justify-content: center; gap: 2rem; } .home-footer__links a { color: var(--text-muted); text-decoration: none; font-size: 0.9375rem; } .home-footer__links a:hover { color: var(--accent); } /* ============================================ Responsive ============================================ */ @media (max-width: 768px) { .home section { padding: 3.5rem 1.5rem; } .hero h1 { font-size: 2.25rem; } .pillars { grid-template-columns: 1fr; gap: 1.5rem; } .workers__grid { grid-template-columns: 1fr; } .quick-links__grid { grid-template-columns: 1fr 1fr; } } @media (max-width: 480px) { .hero h1 { font-size: 1.875rem; } .hero__buttons { flex-direction: column; } .btn { width: 100%; justify-content: center; } .quick-links__grid { grid-template-columns: 1fr; } } benoitc-gunicorn-f5fb19e/docs/content/community.md000066400000000000000000000023301514360242400224310ustar00rootroot00000000000000# Community Connect with the project through these channels. ## Project management & discussions Project maintenance guidelines live on the [wiki](https://github.com/benoitc/gunicorn/wiki/Project-management). GitHub is used for: - [Bug reports](https://github.com/benoitc/gunicorn/issues) — search before opening a new issue. - [Discussions](https://github.com/benoitc/gunicorn/discussions) — Q&A and usage tips. - [Feature planning](https://github.com/benoitc/gunicorn/issues) — development and project management topics. ## IRC Join the Gunicorn channel on [Libera Chat](https://libera.chat/) at [`#gunicorn`](https://web.libera.chat/?channels=#gunicorn). ## Issue tracking File bugs, enhancements, and tasks in the [GitHub issue tracker](https://github.com/benoitc/gunicorn/issues). ## Security issues Report security vulnerabilities privately to [`security@gunicorn.org`](mailto:security@gunicorn.org); only core developers subscribe to this list. ## Contributing Start with the [contributing guide](https://github.com/benoitc/gunicorn/blob/master/CONTRIBUTING.md) for development workflow, code style, and review expectations. New contributors are welcome—open a draft pull request early to gather feedback. benoitc-gunicorn-f5fb19e/docs/content/configure.md000066400000000000000000000041131514360242400223670ustar00rootroot00000000000000 # Configuration Overview Gunicorn reads configuration from five places, in increasing order of priority: 1. Environment variables, for settings that support them. 2. Framework-specific configuration (currently Paste Deploy only). 3. A Python configuration file `gunicorn.conf.py` (default in the working directory). 4. The `GUNICORN_CMD_ARGS` environment variable. 5. Command-line arguments. If a configuration file is provided both via `GUNICORN_CMD_ARGS` and the CLI, only the file specified on the command line is used. !!! note Print the fully resolved configuration: ```bash gunicorn --print-config APP_MODULE ``` Validate configuration and exit: ```bash gunicorn --check-config APP_MODULE ``` This is also a quick way to confirm that your application can start. ## Command line Options set on the command line override framework settings and values from the configuration file. Not every setting has a command-line flag; run ```bash gunicorn -h ``` for the complete list. The CLI also exposes `--version`, which is not part of the main [settings reference](reference/settings.md). ## Configuration file Provide a Python file (for example `gunicorn.conf.py`). Gunicorn executes the file on every start or reload, so any valid Python is allowed: ```python import multiprocessing bind = "127.0.0.1:8000" workers = multiprocessing.cpu_count() * 2 + 1 ``` Every configuration key is documented in the [settings reference](reference/settings.md). ## Framework settings At present only Paste Deploy applications expose framework-specific settings. If you have ideas for Django or other frameworks, open an [issue](https://github.com/benoitc/gunicorn/issues). ### Paste applications Reference Gunicorn as the server in your INI file: ```ini [server:main] use = egg:gunicorn#main host = 192.168.0.1 port = 80 workers = 2 proc_name = brim ``` Gunicorn merges any recognised parameters into the base configuration. Values from the configuration file and command line still override these defaults. benoitc-gunicorn-f5fb19e/docs/content/custom.md000066400000000000000000000030441514360242400217220ustar00rootroot00000000000000 # Custom Application !!! info "Added in 19.0" Use Gunicorn as part of your own WSGI application by subclassing `gunicorn.app.base.BaseApplication`. Example: create a tiny WSGI app and load it with a custom application: ```text --8<-- "examples/standalone_app.py" ``` ## Using server hooks Provide hooks through configuration, just like a standard Gunicorn deployment. For example, a `pre_fork` hook: ```python def pre_fork(server, worker): print(f"pre-fork server {server} worker {worker}", file=sys.stderr) if __name__ == "__main__": options = { "bind": "127.0.0.1:8080", "workers": number_of_workers(), "pre_fork": pre_fork, } ``` ## Direct usage of existing WSGI apps Run Gunicorn from Python to serve a WSGI application instance at runtime—useful for rolling deploys or packaging with PEX. Gunicorn exposes `gunicorn.app.wsgiapp`, which accepts any WSGI app (for example a Flask or Django instance). Assuming your package is `exampleapi` and the application is `app`: ```bash python -m gunicorn.app.wsgiapp exampleapi:app ``` All CLI flags and configuration files still apply: ```bash # Custom parameters python -m gunicorn.app.wsgiapp exampleapi:app --bind=0.0.0.0:8081 --workers=4 # Using a config file python -m gunicorn.app.wsgiapp exampleapi:app -c config.py ``` For PEX builds use `-c gunicorn` at build time so the packaged app accepts the entry point at runtime: ```bash pex . -v -c gunicorn -o compiledapp.pex ./compiledapp.pex exampleapi:app -c gunicorn_config.py ``` benoitc-gunicorn-f5fb19e/docs/content/deploy.md000066400000000000000000000232251514360242400217070ustar00rootroot00000000000000# Deploying Gunicorn We strongly recommend running Gunicorn behind a proxy server. ## Nginx configuration Although many HTTP proxies exist, we recommend [Nginx](https://nginx.org/). When using the default synchronous workers you must ensure the proxy buffers slow clients; otherwise Gunicorn becomes vulnerable to denial-of-service attacks. Use [Hey](https://github.com/rakyll/hey) to verify proxy behaviour. An example configuration for fast clients with Nginx ([source](https://github.com/benoitc/gunicorn/blob/master/examples/nginx.conf)): ```nginx title="nginx.conf" --8<-- "examples/nginx.conf" ``` To support streaming requests/responses or patterns such as Comet, long polling, or WebSockets, disable proxy buffering and run Gunicorn with an async worker class: ```nginx location @proxy_to_app { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Host $http_host; proxy_redirect off; proxy_buffering off; proxy_pass http://app_server; } ``` To ignore aborted requests (for example, health checks that close connections prematurely) enable [`proxy_ignore_client_abort`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_client_abort): ```nginx proxy_ignore_client_abort on; ``` !!! note The default value for `proxy_ignore_client_abort` is `off`. If it remains off Nginx logs will report error 499 and Gunicorn may log `Ignoring EPIPE` when the log level is `debug`. Pass protocol information to Gunicorn so applications can generate correct URLs. Add this header to your `location` block: ```nginx proxy_set_header X-Forwarded-Proto $scheme; ``` If Nginx runs on a different host, tell Gunicorn which proxies are trusted so it accepts the `X-Forwarded-*` headers: ```bash gunicorn -w 3 --forwarded-allow-ips="10.170.3.217,10.170.3.220" test:app ``` When all traffic comes from trusted proxies (for example Heroku) you can set `--forwarded-allow-ips='*'`. This is **dangerous** if untrusted clients can reach Gunicorn directly, because forged headers could make your application serve secure content over plain HTTP. Gunicorn 19 changed the handling of `REMOTE_ADDR` to conform to [RFC 3875](https://www.rfc-editor.org/rfc/rfc3875), meaning it now records the proxy IP rather than the upstream client. To log the real client address, set [`access_log_format`](reference/settings.md#access_log_format) to include `X-Forwarded-For`: ```text %({x-forwarded-for}i)s %(l.md)s %(u.md)s %(t.md)s "%(r.md)s" %(s.md)s %(b.md)s "%(f.md)s" "%(a.md)s" ``` When binding Gunicorn to a UNIX socket `REMOTE_ADDR` will be empty. ## PROXY Protocol The [PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) allows load balancers and reverse proxies to pass original client connection information (IP address, port) to backend servers. This is especially useful when TLS termination happens at the proxy layer. Gunicorn supports both PROXY protocol v1 (text format) and v2 (binary format). ### Configuration Enable PROXY protocol with the `--proxy-protocol` option: ```bash # Auto-detect v1 or v2 (recommended) gunicorn --proxy-protocol auto app:app # Force v1 only (text format) gunicorn --proxy-protocol v1 app:app # Force v2 only (binary format, more efficient) gunicorn --proxy-protocol v2 app:app ``` Using `--proxy-protocol` without a value is equivalent to `auto`. !!! warning "Security" Only enable PROXY protocol when Gunicorn is behind a trusted proxy that sends PROXY headers. Configure [`--proxy-allow-from`](reference/settings.md#proxy_allow_ips) to restrict which IPs can send PROXY protocol headers. ### HAProxy HAProxy can send PROXY protocol headers to backends. Example configuration: ```haproxy frontend https_front bind *:443 ssl crt /etc/ssl/certs/site.pem default_backend gunicorn_back backend gunicorn_back # Send PROXY protocol v2 (binary, more efficient) server gunicorn 127.0.0.1:8000 send-proxy-v2 # Or use v1 (text format) # server gunicorn 127.0.0.1:8000 send-proxy ``` Start Gunicorn to accept PROXY protocol: ```bash gunicorn -b 127.0.0.1:8000 --proxy-protocol v2 --proxy-allow-from 127.0.0.1 app:app ``` ### stunnel [stunnel](https://www.stunnel.org/) can terminate TLS and forward connections with PROXY protocol headers: ```ini # /etc/stunnel/stunnel.conf [https] accept = 443 connect = 127.0.0.1:8000 cert = /etc/ssl/certs/stunnel.pem key = /etc/ssl/certs/stunnel.key protocol = proxy ``` The `protocol = proxy` directive tells stunnel to prepend PROXY protocol v1 headers to forwarded connections. ### AWS/ELB AWS Network Load Balancers (NLB) and Application Load Balancers (ALB) support PROXY protocol v2. Enable it in the target group settings, then configure Gunicorn: ```bash gunicorn --proxy-protocol v2 --proxy-allow-from '*' app:app ``` !!! note When using `--proxy-allow-from '*'` ensure Gunicorn is not directly accessible from the internet—only through the load balancer. ## Using virtual environments Install Gunicorn inside your project [virtual environment](https://pypi.python.org/pypi/virtualenv) to keep versions isolated: ```bash mkdir ~/venvs/ virtualenv ~/venvs/webapp source ~/venvs/webapp/bin/activate pip install gunicorn deactivate ``` Force installation into the active virtual environment with `--ignore-installed`: ```bash source ~/venvs/webapp/bin/activate pip install -I gunicorn ``` ## Monitoring !!! note Do not enable Gunicorn's daemon mode when using process monitors. These supervisors expect to manage the direct child process. ### Gaffer Use [Gaffer](https://gaffer.readthedocs.io/) with *gafferd* to manage Gunicorn: ```ini [process:gunicorn] cmd = gunicorn -w 3 test:app cwd = /path/to/project ``` Create a `Procfile` if you prefer: ```procfile gunicorn = gunicorn -w 3 test:app ``` Start Gunicorn via Gaffer: ```bash gaffer start ``` Or load it into a running *gafferd* instance: ```bash gaffer load ``` ### runit [runit](http://smarden.org/runit/) is a popular supervisor. A sample service script (see the [full example](https://github.com/benoitc/gunicorn/blob/master/examples/gunicorn_rc)): ```bash #!/bin/sh GUNICORN=/usr/local/bin/gunicorn ROOT=/path/to/project PID=/var/run/gunicorn.pid APP=main:application if [ -f $PID ]; then rm $PID; fi cd $ROOT exec $GUNICORN -c $ROOT/gunicorn.conf.py --pid=$PID $APP ``` Save as `/etc/sv//run`, make it executable, and symlink into `/etc/service/`. runit will then supervise Gunicorn. ### Supervisor [Supervisor](http://supervisord.org/) configuration example (adapted from [examples/supervisor.conf](https://github.com/benoitc/gunicorn/blob/master/examples/supervisor.conf)): ```ini [program:gunicorn] command=/path/to/gunicorn main:application -c /path/to/gunicorn.conf.py directory=/path/to/project user=nobody autostart=true autorestart=true redirect_stderr=true ``` ### Upstart Sample Upstart config (logs go to `/var/log/upstart/myapp.log`): ```upstart # /etc/init/myapp.conf description "myapp" start on (filesystem.md) stop on runlevel [016] respawn setuid nobody setgid nogroup chdir /path/to/app/directory exec /path/to/virtualenv/bin/gunicorn myapp:app ``` ### systemd [systemd](https://www.freedesktop.org/wiki/Software/systemd/) can create a UNIX socket and launch Gunicorn on demand. Service file: ```ini # /etc/systemd/system/gunicorn.service [Unit] Description=gunicorn daemon Requires=gunicorn.socket After=network.target [Service] Type=notify NotifyAccess=main User=someuser Group=someuser WorkingDirectory=/home/someuser/applicationroot ExecStart=/usr/bin/gunicorn applicationname.wsgi ExecReload=/bin/kill -s HUP $MAINPID KillMode=mixed TimeoutStopSec=5 PrivateTmp=true [Install] WantedBy=multi-user.target ``` `Type=notify` lets Gunicorn report readiness to systemd. If the service should run under a transient user consider adding `DynamicUser=true`. Tighten permissions further with `ProtectSystem=strict` if the app permits. Socket activation file: ```ini # /etc/systemd/system/gunicorn.socket [Unit] Description=gunicorn socket [Socket] ListenStream=/run/gunicorn.sock SocketUser=www-data SocketGroup=www-data SocketMode=0660 [Install] WantedBy=sockets.target ``` Enable and start the socket so it begins listening immediately and on reboot: ```bash systemctl enable --now gunicorn.socket ``` Test connectivity from the nginx user (Debian defaults to `www-data`): ```bash sudo -u www-data curl --unix-socket /run/gunicorn.sock http ``` !!! note Use `systemctl show --value -p MainPID gunicorn.service` to retrieve the main process ID or `systemctl kill -s HUP gunicorn.service` to send signals. Configure Nginx to proxy to the new socket: ```nginx user www-data; ... http { server { listen 8000; server_name 127.0.0.1; location / { proxy_pass http://unix:/run/gunicorn.sock; } } } ... ``` !!! note Adjust `listen` and `server_name` for production (typically port 80 and your site's domain). Ensure nginx starts automatically: ```bash systemctl enable nginx.service systemctl start nginx ``` Browse to to verify Gunicorn + Nginx + systemd. ## Logging Configure logging through the CLI flags described in the [settings documentation](reference/settings.md#logging) or via a [logging configuration file](https://github.com/benoitc/gunicorn/blob/master/examples/logging.conf). Rotate logs with `logrotate` by sending `SIGUSR1`: ```bash kill -USR1 $(cat /var/run/gunicorn.pid) ``` !!! note If you override the `LOGGING` dictionary, set `disable_existing_loggers` to `False` so Gunicorn's loggers remain active. !!! warning Gunicorn's error log should capture Gunicorn-related messages only. Route your application logs separately. benoitc-gunicorn-f5fb19e/docs/content/design.md000066400000000000000000000143721514360242400216670ustar00rootroot00000000000000 # Design A brief look at Gunicorn's architecture. ## Server Model Gunicorn uses a **pre-fork worker model**: an arbiter process manages worker processes, while the workers handle requests and responses. The arbiter never touches individual client sockets.
⚖️
### Arbiter Orchestrates the worker pool. Listens for signals (`TTIN`, `TTOU`, `CHLD`, `HUP`) to adjust workers, restart them on failure, or reload configuration.
⚙️
### Worker Pool Each worker handles requests independently. Worker types determine concurrency model: sync, threaded, or async via greenlets/asyncio.
📡
### Signal Communication `TTIN`/`TTOU` adjust worker count. `CHLD` triggers restart of crashed workers. `HUP` reloads configuration. See [Signals](signals.md).
## Worker Types Choose a worker type based on your application's needs. === "Sync" The **default** worker. Handles one request at a time per worker. - Simple and predictable - Errors affect only the current request - No keep-alive support (connections close after response) - Requires a buffering proxy (nginx, HAProxy) for production ```bash gunicorn myapp:app ``` === "Gthread" Threaded worker with a **thread pool** per worker process. - Supports keep-alive connections - Good balance of concurrency and simplicity - Threads share memory (lower footprint than workers) - Idle connections close after keepalive timeout ```bash gunicorn myapp:app -k gthread --threads 4 ``` === "ASGI" Native **asyncio** support for modern async frameworks. - For FastAPI, Starlette, Quart, and other ASGI apps - Full async/await support - See the [ASGI Guide](asgi.md) for details ```bash gunicorn myapp:app -k uvicorn.workers.UvicornWorker ``` === "Gevent" **Greenlet-based** async worker using [Gevent](http://www.gevent.org/). - Handles thousands of concurrent connections - Supports keep-alive, WebSockets, long-polling - May require patches for some libraries (e.g., `psycogreen` for Psycopg) - Not compatible with code that relies on blocking behavior ```bash gunicorn myapp:app -k gevent --worker-connections 1000 ``` === "Eventlet (Deprecated)" !!! warning "Deprecated" The eventlet worker is **deprecated** and will be removed in Gunicorn 26.0. Eventlet itself is [no longer actively maintained](https://eventlet.readthedocs.io/en/latest/asyncio/migration.html). Please migrate to `gevent`, `gthread`, or another supported worker type. **Greenlet-based** async worker using [Eventlet](http://eventlet.net/). - Similar capabilities to Gevent - Handles high concurrency for I/O-bound apps - Some libraries may need compatibility patches ```bash gunicorn myapp:app -k eventlet --worker-connections 1000 ``` === "Tornado" Worker for [Tornado](https://www.tornadoweb.org/) applications. - Designed for Tornado's async framework - Can serve WSGI apps, but not recommended for that use case - Use when running native Tornado applications ```bash gunicorn myapp:app -k tornado ``` ## Comparison | Worker | Concurrency Model | Keep-Alive | Best For | |--------|-------------------|------------|----------| | `sync` | 1 request/worker | ❌ | CPU-bound apps behind a proxy | | `gthread` | Thread pool | ✅ | Mixed workloads, moderate concurrency | | ASGI workers | AsyncIO | ✅ | Modern async frameworks (FastAPI, etc.) | | `gevent` | Greenlets | ✅ | I/O-bound, WebSockets, streaming | | `eventlet` | Greenlets | ✅ | **Deprecated** - use `gevent` instead | | `tornado` | Tornado IOLoop | ✅ | Native Tornado applications | !!! tip "Quick Decision Guide" - **Simple app behind nginx?** → `sync` (default) - **Need keep-alive or moderate concurrency?** → `gthread` - **WebSockets, streaming, long-polling?** → `gevent` or ASGI worker - **FastAPI, Starlette, or async framework?** → ASGI worker ## When to Use Async Workers Synchronous workers assume your app is CPU or network bound and avoids indefinite blocking operations. Use async workers when you have: - Long blocking calls (external APIs, slow databases) - Direct internet traffic without a buffering proxy - Streaming request/response bodies - Long polling or Comet patterns - WebSockets !!! info "Testing Slow Clients" Tools like [Hey](https://github.com/rakyll/hey) can simulate slow responses to test how your configuration handles them. ## Scaling ### How Many Workers? !!! warning "Don't Over-Scale" Workers ≠ clients. Gunicorn typically needs only **4–12 workers** to handle heavy traffic. Too many workers waste resources and can reduce throughput. Start with this formula and adjust under load: ``` workers = (2 × CPU cores) + 1 ``` Use `TTIN`/`TTOU` signals to adjust the worker count at runtime. ### How Many Threads? With the `gthread` worker, you can combine workers and threads: ```bash gunicorn myapp:app -k gthread --workers 4 --threads 2 ``` !!! info "Threads vs Workers" - **Threads** share memory → lower footprint - **Workers** isolate failures → better fault tolerance - Combine both for the best of both worlds Threads can extend request time beyond the worker timeout while still notifying the arbiter. The optimal mix depends on your runtime (CPython vs PyPy) and workload. ## Configuration Examples ```bash # Sync (default) - simple apps behind nginx gunicorn myapp:app # Gthread - keep-alive and thread concurrency gunicorn myapp:app -k gthread --workers 4 --threads 4 # Gevent - high concurrency for I/O-bound apps gunicorn myapp:app -k gevent --workers 4 --worker-connections 1000 # ASGI - FastAPI/Starlette with Uvicorn worker gunicorn myapp:app -k uvicorn.workers.UvicornWorker --workers 4 ``` !!! note "Third-Party AsyncIO Workers" For asyncio frameworks, you can also use third-party workers. See the [aiohttp deployment guide](https://docs.aiohttp.org/en/stable/deployment.html#nginx-gunicorn) for examples. benoitc-gunicorn-f5fb19e/docs/content/dirty.md000066400000000000000000001133351514360242400215500ustar00rootroot00000000000000--- title: Dirty Arbiters menu: guides: weight: 10 --- # Dirty Arbiters !!! warning "Beta Feature" Dirty Arbiters is a beta feature introduced in Gunicorn 25.0.0. While it has been tested, the API and behavior may change in future releases. Please report any issues on [GitHub](https://github.com/benoitc/gunicorn/issues). Dirty Arbiters provide a separate process pool for executing long-running, blocking operations (AI model loading, heavy computation) without blocking HTTP workers. This feature is inspired by Erlang's dirty schedulers. ## Overview Traditional Gunicorn workers are designed to handle HTTP requests quickly. Long-running operations like loading ML models or performing heavy computation can block these workers, reducing the server's ability to handle concurrent requests. Dirty Arbiters solve this by providing: - **Separate worker pool** - Completely separate from HTTP workers, can be killed/restarted independently - **Stateful workers** - Loaded resources persist in dirty worker memory - **Message-passing IPC** - Communication via Unix sockets with binary TLV protocol - **Explicit API** - Clear `execute()` calls (no hidden IPC) - **Asyncio-based** - Clean concurrent handling with streaming support ## Design Philosophy Dirty Arbiters follow several key design principles: ### Separate Process Hierarchy Unlike threads or in-process pools, Dirty Arbiters use a fully separate process tree: - **Isolation** - A crash or memory leak in a dirty worker cannot affect HTTP workers - **Independent lifecycle** - Dirty workers can be killed/restarted without affecting request handling - **Resource accounting** - OS-level memory limits can be applied per-process - **Clean shutdown** - Each process tree can be signaled and terminated independently ### Erlang Inspiration The name and concept come from Erlang's "dirty schedulers" - special schedulers that handle operations that would block normal schedulers. In Erlang, dirty schedulers run NIFs (Native Implemented Functions) that can't yield. Similarly, Gunicorn's Dirty Arbiters handle Python operations that would block HTTP workers. ### Why Asyncio The Dirty Arbiter uses asyncio for its core loop rather than the main arbiter's select-based approach: - **Non-blocking IPC** - Can handle many concurrent client connections efficiently - **Concurrent request routing** - Multiple requests can be dispatched to workers simultaneously - **Streaming support** - Native async generators for streaming responses - **Clean signal handling** - Signals integrate cleanly via `loop.add_signal_handler()` ### Stateful Applications Traditional WSGI apps are request-scoped - they're invoked per-request and don't maintain state between requests. Dirty apps are different: - **Long-lived** - Apps persist in worker memory for the worker's lifetime - **Pre-loaded resources** - Models, connections, and caches stay loaded - **Explicit state management** - Apps control their own lifecycle via `init()` and `close()` This makes dirty apps ideal for ML inference, where loading a model once and reusing it for many requests is essential. ## Architecture ``` +-------------------+ | Main Arbiter | | (manages both) | +--------+----------+ | SIGTERM/SIGHUP/SIGUSR1 (forwarded) | +----------------------+----------------------+ | | +-----v-----+ +------v------+ | HTTP | | Dirty | | Workers | | Arbiter | +-----------+ +------+------+ | | | Unix Socket IPC SIGTERM/SIGHUP | /tmp/gunicorn_dirty_.sock | +------------------>---------------------->---+ +-----------+-----------+ | | | +-----v---+ +-----v---+ +-----v---+ | Dirty | | Dirty | | Dirty | | Worker | | Worker | | Worker | +---------+ +---------+ +---------+ ^ | ^ | ^ | | | | | | | Heartbeat (mtime every dirty_timeout/2) | | | | | | +---+--------+---+-------+---+ | Workers load apps based on allocation Worker 1: [MLApp, ImageApp, HeavyApp] Worker 2: [MLApp, ImageApp, HeavyApp] Worker 3: [MLApp, ImageApp] (HeavyApp workers=2) ``` ### Process Relationships | Component | Parent | Communication | |-----------|--------|---------------| | Main Arbiter | init/systemd | Signals from OS | | HTTP Workers | Main Arbiter | Pipes, signals | | Dirty Arbiter | Main Arbiter | Signals, exit status | | Dirty Workers | Dirty Arbiter | Unix socket, signals, WorkerTmp | ## Configuration Add these settings to your Gunicorn configuration file or command line: ```python # gunicorn.conf.py dirty_apps = [ "myapp.ml:MLApp", "myapp.images:ImageApp", ] dirty_workers = 2 # Number of dirty workers dirty_timeout = 300 # Task timeout in seconds dirty_threads = 1 # Threads per worker dirty_graceful_timeout = 30 # Shutdown timeout ``` Or via command line: ```bash gunicorn myapp:app \ --dirty-app myapp.ml:MLApp \ --dirty-app myapp.images:ImageApp \ --dirty-workers 2 \ --dirty-timeout 300 ``` ### Configuration Options | Setting | Default | Description | |---------|---------|-------------| | `dirty_apps` | `[]` | List of dirty app import paths | | `dirty_workers` | `0` | Number of dirty workers (0 = disabled) | | `dirty_timeout` | `300` | Task timeout in seconds | | `dirty_threads` | `1` | Threads per dirty worker | | `dirty_graceful_timeout` | `30` | Graceful shutdown timeout | ## Per-App Worker Allocation By default, all dirty workers load all configured apps. For apps that consume significant memory (like large ML models), you can limit how many workers load a specific app. ### Why Per-App Allocation? Consider a scenario with a 10GB ML model and 8 dirty workers: - **Default behavior**: 8 workers × 10GB = 80GB RAM - **With `workers=2`**: 2 workers × 10GB = 20GB RAM (75% savings) Requests for the limited app are routed only to workers that have it loaded. ### Configuration Methods **Method 1: Class Attribute** Set the `workers` attribute on your DirtyApp class: ```python from gunicorn.dirty import DirtyApp class HeavyModelApp(DirtyApp): workers = 2 # Only 2 workers will load this app def init(self): self.model = load_10gb_model() def predict(self, data): return self.model.predict(data) def close(self): pass ``` **Method 2: Config Override** Use the `module:class:N` format in your config: ```python # gunicorn.conf.py dirty_apps = [ "myapp.light:LightApp", # All workers (default) "myapp.heavy:HeavyModelApp:2", # Only 2 workers "myapp.single:SingletonApp:1", # Only 1 worker ] dirty_workers = 4 ``` Config overrides take precedence over class attributes. ### Worker Distribution When workers spawn, apps are assigned based on their limits: ``` Example with dirty_workers=4: - LightApp (workers=None): Loaded on workers 1, 2, 3, 4 - HeavyModelApp (workers=2): Loaded on workers 1, 2 - SingletonApp (workers=1): Loaded on worker 1 Worker 1: [LightApp, HeavyModelApp, SingletonApp] Worker 2: [LightApp, HeavyModelApp] Worker 3: [LightApp] Worker 4: [LightApp] ``` ### Request Routing Requests are automatically routed to workers that have the target app: ```python client = get_dirty_client() # Goes to any of 4 workers (round-robin) client.execute("myapp.light:LightApp", "action") # Goes to worker 1 or 2 only (round-robin between those) client.execute("myapp.heavy:HeavyModelApp", "predict", data) # Always goes to worker 1 client.execute("myapp.single:SingletonApp", "process") ``` ### Error Handling If no workers have the requested app loaded, a `DirtyNoWorkersAvailableError` is raised: ```python from gunicorn.dirty import get_dirty_client from gunicorn.dirty.errors import DirtyNoWorkersAvailableError def my_view(request): client = get_dirty_client() try: result = client.execute("myapp.heavy:HeavyModelApp", "predict", data) except DirtyNoWorkersAvailableError as e: # All workers with this app are down or app not configured return {"error": "Service temporarily unavailable", "app": e.app_path} ``` ### Worker Crash Recovery When a worker crashes, its replacement gets the **same apps** as the dead worker: ``` Timeline: t=0: Worker 1 crashes (had HeavyModelApp) t=1: Arbiter detects crash, queues respawn t=2: New Worker 5 spawns with same apps as Worker 1 t=3: HeavyModelApp still available on Worker 2 during gap ``` This ensures: - No memory redistribution on existing workers - Predictable replacement behavior - The heavy model is only loaded on the new worker ### Best Practices 1. **Set realistic limits** - Don't set `workers=1` unless truly necessary (single point of failure) 2. **Monitor memory** - Track per-worker memory to tune allocation 3. **Handle unavailability** - Catch `DirtyNoWorkersAvailableError` gracefully 4. **Use class attributes for app-specific limits** - Makes the limit part of the app definition 5. **Use config for deployment-specific overrides** - Different limits for dev vs prod ## Creating a Dirty App Dirty apps inherit from `DirtyApp` and implement three methods: ```python # myapp/dirty.py from gunicorn.dirty import DirtyApp class MLApp(DirtyApp): """Dirty application for ML workloads.""" def __init__(self): self.models = {} def init(self): """Called once at dirty worker startup.""" # Pre-load commonly used models self.models['default'] = self._load_model('base-model') def __call__(self, action, *args, **kwargs): """Dispatch to action methods.""" method = getattr(self, action, None) if method is None: raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def load_model(self, name): """Load a model into memory.""" if name not in self.models: self.models[name] = self._load_model(name) return {"loaded": True, "name": name} def inference(self, model_name, input_text): """Run inference on loaded model.""" model = self.models.get(model_name) if not model: raise ValueError(f"Model not loaded: {model_name}") return model.predict(input_text) def _load_model(self, name): import torch model = torch.load(f"models/{name}.pt") return model def close(self): """Cleanup on shutdown.""" for model in self.models.values(): del model ``` ### DirtyApp Interface | Method/Attribute | Description | |------------------|-------------| | `workers` | Class attribute. Number of workers to load this app (`None` = all workers). | | `init()` | Called once when dirty worker starts, after instantiation. Load resources here. | | `__call__(action, *args, **kwargs)` | Handle requests from HTTP workers. | | `close()` | Called when dirty worker shuts down. Cleanup resources. | ### Initialization Sequence When a dirty worker starts, initialization happens in this order: 1. **Fork** - Worker process is forked from dirty arbiter 2. **`dirty_post_fork(arbiter, worker)`** - Hook called immediately after fork 3. **App instantiation** - Each dirty app class is instantiated (`__init__`) 4. **`app.init()`** - Called for each app after instantiation (load models, resources) 5. **`dirty_worker_init(worker)`** - Hook called after ALL apps are initialized 6. **Run loop** - Worker starts accepting requests from HTTP workers This means: - Use `__init__` for basic setup (initialize empty containers, store config) - Use `init()` for heavy loading (ML models, database connections, large files) - The `dirty_worker_init` hook fires only after all apps have completed their `init()` calls ## Using from HTTP Workers ### Sync Workers (sync, gthread) ```python from gunicorn.dirty import get_dirty_client def my_view(request): client = get_dirty_client() # Load a model client.execute("myapp.ml:MLApp", "load_model", "gpt-4") # Run inference result = client.execute( "myapp.ml:MLApp", "inference", "gpt-4", input_text=request.data ) return result ``` ### Async Workers (ASGI) ```python from gunicorn.dirty import get_dirty_client_async async def my_view(request): client = await get_dirty_client_async() # Non-blocking execution await client.execute_async("myapp.ml:MLApp", "load_model", "gpt-4") result = await client.execute_async( "myapp.ml:MLApp", "inference", "gpt-4", input_text=request.data ) return result ``` ## Streaming Dirty Arbiters support streaming responses for use cases like LLM token generation, where data is produced incrementally. This enables real-time delivery of results without waiting for complete execution. ### Streaming with Generators Any dirty app action that returns a generator (sync or async) automatically streams chunks to the client: ```python # myapp/llm.py from gunicorn.dirty import DirtyApp class LLMApp(DirtyApp): def init(self): from transformers import pipeline self.generator = pipeline("text-generation", model="gpt2") def generate(self, prompt): """Sync streaming - yields tokens.""" for token in self.generator(prompt, stream=True): yield token["generated_text"] async def generate_async(self, prompt): """Async streaming - yields tokens.""" import openai client = openai.AsyncOpenAI() stream = await client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": prompt}], stream=True ) async for chunk in stream: if chunk.choices[0].delta.content: yield chunk.choices[0].delta.content def close(self): pass ``` ### Client Streaming API Use `stream()` for sync workers and `stream_async()` for async workers: **Sync Workers (sync, gthread):** ```python from gunicorn.dirty import get_dirty_client def generate_view(request): client = get_dirty_client() def generate_response(): for chunk in client.stream("myapp.llm:LLMApp", "generate", request.prompt): yield chunk return StreamingResponse(generate_response()) ``` **Async Workers (ASGI):** ```python from gunicorn.dirty import get_dirty_client_async async def generate_view(request): client = await get_dirty_client_async() async def generate_response(): async for chunk in client.stream_async("myapp.llm:LLMApp", "generate", request.prompt): yield chunk return StreamingResponse(generate_response()) ``` ### Streaming Protocol Streaming uses a simple protocol with three message types: 1. **Chunk** (`type: "chunk"`) - Contains partial data 2. **End** (`type: "end"`) - Signals stream completion 3. **Error** (`type: "error"`) - Signals error during streaming Example message flow: ``` Client -> Arbiter -> Worker: request Worker -> Arbiter -> Client: chunk (data: "Hello") Worker -> Arbiter -> Client: chunk (data: " ") Worker -> Arbiter -> Client: chunk (data: "World") Worker -> Arbiter -> Client: end ``` ## Binary Protocol The dirty worker IPC uses a binary protocol inspired by OpenBSD msgctl/msgsnd for efficient data transfer. This eliminates base64 encoding overhead for binary data like images, audio, or model weights. ### Header Format (16 bytes) ``` +--------+--------+--------+--------+--------+--------+--------+--------+ | Magic (2B) | Ver(1) | MType | Payload Length (4B) | +--------+--------+--------+--------+--------+--------+--------+--------+ | Request ID (8 bytes) | +--------+--------+--------+--------+--------+--------+--------+--------+ ``` - **Magic**: `0x47 0x44` ("GD" for Gunicorn Dirty) - **Version**: `0x01` - **MType**: Message type (`0x01`=REQUEST, `0x02`=RESPONSE, `0x03`=ERROR, `0x04`=CHUNK, `0x05`=END) - **Length**: Payload size (big-endian uint32, max 64MB) - **Request ID**: uint64 identifier ### TLV Payload Encoding Payloads use Type-Length-Value encoding: | Type | Code | Description | |------|------|-------------| | None | `0x00` | No value bytes | | Bool | `0x01` | 1 byte (0x00/0x01) | | Int64 | `0x05` | 8 bytes big-endian signed | | Float64 | `0x06` | 8 bytes IEEE 754 | | Bytes | `0x10` | 4-byte length + raw bytes | | String | `0x11` | 4-byte length + UTF-8 | | List | `0x20` | 4-byte count + elements | | Dict | `0x21` | 4-byte count + key-value pairs | ### Binary Data Benefits The binary protocol allows passing raw bytes directly without encoding: ```python # Image processing with binary data def resize(self, image_data, width, height): """Resize an image - image_data is raw bytes.""" img = Image.open(io.BytesIO(image_data)) resized = img.resize((width, height)) buffer = io.BytesIO() resized.save(buffer, format='PNG') return buffer.getvalue() # Returns raw bytes # Called from HTTP worker thumbnail = client.execute( "myapp.images:ImageApp", "thumbnail", raw_image_bytes, # No base64 encoding needed size=256 ) ``` ### Error Handling in Streams Errors during streaming are delivered as error messages: ```python def generate_view(request): client = get_dirty_client() try: for chunk in client.stream("myapp.llm:LLMApp", "generate", prompt): yield chunk except DirtyError as e: # Error occurred mid-stream yield f"\n[Error: {e.message}]" ``` ### Best Practices for Streaming 1. **Use async generators for I/O-bound streaming** - e.g., API calls to external services 2. **Use sync generators for CPU-bound streaming** - e.g., local model inference 3. **Yield frequently** - Heartbeats are sent during streaming to keep workers alive 4. **Keep chunks small** - Smaller chunks provide better perceived latency 5. **Handle client disconnection** - Streams continue even if client disconnects; design accordingly ## Stash (Shared State via Message Passing) Stash provides shared state between dirty workers, similar to Erlang's ETS (Erlang Term Storage). Workers remain fully isolated - all state access goes through message passing to the arbiter. ### Architecture ``` +------------------+ | Dirty Arbiter | | | | stash_tables: | | sessions: {} | | cache: {} | +--------+---------+ | Unix Socket IPC (message passing) | +-------------------+-------------------+ | | | +-----v-----+ +-----v-----+ +-----v-----+ | Worker 1 | | Worker 2 | | Worker 3 | | | | | | | | (isolated)| | (isolated)| | (isolated)| +-----------+ +-----------+ +-----------+ Workers have NO shared memory. All stash operations are IPC messages to arbiter. ``` ### How It Works 1. Worker calls `stash.put("sessions", "user:1", data)` 2. Worker sends message to arbiter via Unix socket 3. Arbiter stores data in its memory (`self.stash_tables`) 4. Arbiter sends response back to worker 5. Worker receives confirmation This is **not** shared memory - workers remain fully isolated. The arbiter acts as a centralized store that workers communicate with via message passing. This matches Erlang's model where ETS tables are owned by a process. ### Basic Usage ```python from gunicorn.dirty import stash # Store a value (table auto-created) # This sends a message to arbiter, which stores it stash.put("sessions", "user:123", {"name": "Alice", "role": "admin"}) # Retrieve a value # This sends a request to arbiter, which returns the value user = stash.get("sessions", "user:123") # Delete a key stash.delete("sessions", "user:123") # Check existence if stash.exists("sessions", "user:123"): print("Session exists") # List keys with pattern matching keys = stash.keys("sessions", pattern="user:*") ``` ### Dict-like Interface For more Pythonic access, use the table interface: ```python from gunicorn.dirty import stash # Get a table reference sessions = stash.table("sessions") # Dict-like operations (each is an IPC message) sessions["user:123"] = {"name": "Alice"} user = sessions["user:123"] del sessions["user:123"] # Iteration for key in sessions: print(key, sessions[key]) # Length count = len(sessions) ``` ### Table Management ```python from gunicorn.dirty import stash # Explicit table creation (idempotent) stash.ensure("cache") # Get table info info = stash.info("sessions") print(f"Table has {info['size']} entries") # Clear all entries in a table stash.clear("sessions") # Delete entire table stash.delete_table("sessions") # List all tables tables = stash.tables() ``` ### Using Stash in DirtyApp Declare tables your app uses with the `stashes` class attribute: ```python from gunicorn.dirty import DirtyApp, stash class SessionApp(DirtyApp): # Tables declared here are auto-created on startup stashes = ["sessions", "counters"] def init(self): # Initialize counter if needed if not stash.exists("counters", "requests"): stash.put("counters", "requests", 0) def login(self, user_id, user_data): """Store session - any worker can read it via arbiter.""" stash.put("sessions", f"user:{user_id}", { "data": user_data, "logged_in_at": time.time(), }) self._increment_counter() return {"status": "ok"} def get_session(self, user_id): """Get session - request goes to arbiter.""" return stash.get("sessions", f"user:{user_id}") def _increment_counter(self): """Increment global counter via arbiter.""" current = stash.get("counters", "requests", 0) stash.put("counters", "requests", current + 1) def close(self): pass ``` ### API Reference | Function | Description | |----------|-------------| | `stash.put(table, key, value)` | Store a value (table auto-created) | | `stash.get(table, key, default=None)` | Retrieve a value | | `stash.delete(table, key)` | Delete a key, returns True if deleted | | `stash.exists(table, key=None)` | Check if table/key exists | | `stash.keys(table, pattern=None)` | List keys, optional glob pattern | | `stash.clear(table)` | Delete all entries in table | | `stash.info(table)` | Get table info (size, etc.) | | `stash.ensure(table)` | Create table if not exists | | `stash.delete_table(table)` | Delete entire table | | `stash.tables()` | List all table names | | `stash.table(name)` | Get dict-like interface | ### Patterns and Use Cases **Session Storage:** ```python # Store session on login (worker 1) stash.put("sessions", f"user:{user_id}", session_data) # Check session on request (may be worker 2) session = stash.get("sessions", f"user:{user_id}") if session is None: raise AuthError("Not logged in") ``` **Shared Cache:** ```python def get_expensive_result(key): # Check cache first (via arbiter) cached = stash.get("cache", key) if cached is not None: return cached # Compute and cache result = expensive_computation() stash.put("cache", key, result) return result ``` **Global Counters:** ```python def increment_counter(name): # Note: not atomic - two workers could read same value current = stash.get("counters", name, 0) stash.put("counters", name, current + 1) return current + 1 ``` **Feature Flags:** ```python # Set flag (from admin endpoint) stash.put("flags", "new_feature", True) # Check flag (from any worker) if stash.get("flags", "new_feature", False): enable_new_feature() ``` ### Error Handling ```python from gunicorn.dirty.stash import ( StashError, StashTableNotFoundError, StashKeyNotFoundError, ) try: info = stash.info("nonexistent") except StashTableNotFoundError as e: print(f"Table not found: {e.table_name}") # Using get() with default avoids KeyNotFoundError value = stash.get("table", "key", default="fallback") ``` ### Best Practices 1. **Use descriptive table names** - `user_sessions`, `ml_cache`, not `data` 2. **Use key prefixes** - `user:123`, `cache:model:v1` for organization 3. **Handle missing data** - Always provide defaults or check existence 4. **Don't store large data** - Each access is an IPC round-trip 5. **Remember it's ephemeral** - Data is lost on arbiter restart ### Advantages - **Worker isolation** - Workers remain fully isolated; no shared memory bugs - **Simple API** - Dict-like interface, no locking required - **Binary support** - Efficiently stores bytes (images, model weights) - **Pattern matching** - `keys(pattern="user:*")` for querying - **Zero setup** - Works automatically with dirty workers - **Table-based** - Organize data into logical namespaces ### Limitations - **No persistence** - Data lives only in arbiter memory - **No transactions** - No atomic read-modify-write operations - **No TTL** - Entries don't expire automatically - **IPC overhead** - Each operation is a network round-trip - **Single arbiter** - Not distributed across multiple machines For persistent or distributed state, use Redis, PostgreSQL, or similar external systems. ### Flask Example ```python from flask import Flask, Response from gunicorn.dirty import get_dirty_client app = Flask(__name__) @app.route("/chat", methods=["POST"]) def chat(): prompt = request.json.get("prompt") client = get_dirty_client() def stream(): for token in client.stream("myapp.llm:LLMApp", "generate", prompt): yield f"data: {token}\n\n" return Response(stream(), content_type="text/event-stream") ``` ### FastAPI Example ```python from fastapi import FastAPI from fastapi.responses import StreamingResponse from gunicorn.dirty import get_dirty_client_async app = FastAPI() @app.post("/chat") async def chat(prompt: str): client = await get_dirty_client_async() async def stream(): async for token in client.stream_async("myapp.llm:LLMApp", "generate", prompt): yield f"data: {token}\n\n" return StreamingResponse(stream(), media_type="text/event-stream") ``` ## Lifecycle Hooks Dirty Arbiters provide hooks for customization: ```python # gunicorn.conf.py def on_dirty_starting(arbiter): """Called just before the dirty arbiter starts.""" print("Dirty arbiter starting...") def dirty_post_fork(arbiter, worker): """Called just after a dirty worker is forked.""" print(f"Dirty worker {worker.pid} forked") def dirty_worker_init(worker): """Called after a dirty worker initializes all apps.""" print(f"Dirty worker {worker.pid} initialized") def dirty_worker_exit(arbiter, worker): """Called when a dirty worker exits.""" print(f"Dirty worker {worker.pid} exiting") on_dirty_starting = on_dirty_starting dirty_post_fork = dirty_post_fork dirty_worker_init = dirty_worker_init dirty_worker_exit = dirty_worker_exit ``` ## Signal Handling Dirty Arbiters integrate with the main arbiter's signal handling. Signals are forwarded from the main arbiter to the dirty arbiter, which then propagates them to workers. ### Signal Flow ``` Main Arbiter Dirty Arbiter Dirty Workers | | | SIGTERM/SIGHUP/SIGUSR1 ------> signal_handler() | | | | | call_soon_threadsafe() | | | | | handle_signal() | | | | | +------> os.kill(worker, sig) | | | ``` ### Signal Reference | Signal | At Dirty Arbiter | At Dirty Workers | Notes | |--------|-----------------|------------------|-------| | `SIGTERM` | Sets `self.alive = False`, waits for graceful shutdown | Exits after completing current request | Graceful shutdown with timeout | | `SIGQUIT` | Immediate exit via `sys.exit(0)` | Killed immediately | Fast shutdown, no cleanup | | `SIGHUP` | Kills all workers, spawns new ones | Exits immediately | Hot reload of workers | | `SIGUSR1` | Reopens log files, forwards to workers | Reopens log files | Log rotation support | | `SIGTTIN` | Increases worker count by 1 | N/A | Dynamic scaling up | | `SIGTTOU` | Decreases worker count by 1 | N/A | Dynamic scaling down | | `SIGCHLD` | Handled by event loop, triggers reap | N/A | Worker death detection | | `SIGINT` | Same as SIGTERM | Same as SIGTERM | Ctrl-C handling | ### Dynamic Scaling with TTIN/TTOU You can dynamically scale the number of dirty workers at runtime using signals, without restarting gunicorn: ```bash # Find the dirty arbiter process ps aux | grep dirty-arbiter # Or use the PID file (location depends on your app name) cat /tmp/gunicorn-dirty-myapp.pid # Increase dirty workers by 1 kill -TTIN # Decrease dirty workers by 1 kill -TTOU ``` **Minimum Worker Constraint:** The dirty arbiter will not decrease below the minimum number of workers required by your app configurations. For example, if you have an app with `workers = 3`, you cannot scale below 3 dirty workers. When this limit is reached, a warning is logged: ``` WARNING: SIGTTOU: Cannot decrease below 3 workers (required by app specs) ``` **Use Cases:** - **Burst handling** - Scale up when you anticipate heavy load - **Cost optimization** - Scale down during low-traffic periods - **Recovery** - Scale up if workers are busy with long-running tasks ### Forwarded Signals The main arbiter forwards these signals to the dirty arbiter process: - **SIGTERM** - Graceful shutdown of entire process tree - **SIGHUP** - Worker reload (main arbiter reloads HTTP workers, dirty arbiter reloads dirty workers) - **SIGUSR1** - Log rotation across all processes ### Async Signal Handling The dirty arbiter uses asyncio's signal integration for safe handling in the event loop: ```python # Signals are registered with the event loop loop.add_signal_handler(signal.SIGTERM, self.signal_handler, signal.SIGTERM) def signal_handler(self, sig): # Use call_soon_threadsafe for thread-safe event loop integration self.loop.call_soon_threadsafe(self.handle_signal, sig) ``` This pattern ensures signals don't interrupt asyncio operations mid-execution, preventing race conditions and partial state updates. ## Liveness and Health Monitoring Dirty Arbiters implement multiple layers of health monitoring to ensure workers remain responsive and orphaned processes are cleaned up. ### Heartbeat Mechanism Each dirty worker maintains a "worker tmp" file whose mtime serves as a heartbeat: ``` Worker Lifecycle: 1. Worker spawns, creates WorkerTmp file 2. Worker touches file every (dirty_timeout / 2) seconds 3. Arbiter checks all worker mtimes every 1 second 4. If mtime > dirty_timeout seconds old, worker is killed ``` This file-based heartbeat has several advantages: - **OS-level tracking** - No IPC required, works even if worker is stuck in C code - **Crash detection** - Arbiter notices immediately when worker stops updating - **Graceful recovery** - Worker killed with SIGKILL, arbiter spawns replacement ### Timeout Detection The arbiter's monitoring loop checks worker health every second: ```python # Pseudocode for worker monitoring for worker in self.workers: mtime = worker.tmp.last_update() if time.time() - mtime > self.dirty_timeout: log.warning(f"Worker {worker.pid} timed out, killing") os.kill(worker.pid, signal.SIGKILL) ``` When a worker is killed: 1. `SIGCHLD` is delivered to the arbiter 2. Arbiter reaps the worker process 3. `dirty_worker_exit` hook is called 4. A new worker is spawned to maintain `dirty_workers` count ### Parent Death Detection Dirty arbiters monitor their parent process (the main arbiter) to detect orphaning: ```python # In the dirty arbiter's main loop if os.getppid() != self.parent_pid: log.info("Parent died, shutting down") self.alive = False ``` This check runs every iteration of the event loop (typically sub-millisecond). When parent death is detected: 1. Arbiter sets `self.alive = False` 2. All workers are sent SIGTERM 3. Arbiter waits for graceful shutdown (up to `dirty_graceful_timeout`) 4. Remaining workers are sent SIGKILL 5. Arbiter exits ### Orphan Cleanup To handle edge cases where the dirty arbiter itself crashes, a well-known PID file is used: **PID file location**: `/tmp/gunicorn_dirty_.pid` On startup, the dirty arbiter: 1. Checks if PID file exists 2. If yes, reads the old PID and attempts to kill it (`SIGTERM`) 3. Waits briefly for cleanup 4. Writes its own PID to the file 5. On exit, removes the PID file This ensures that if a dirty arbiter crashes and the main arbiter restarts it, the old orphaned process is terminated. ### Respawn Behavior | Component | Respawn Trigger | Respawn Behavior | |-----------|-----------------|------------------| | Dirty Worker | Exit, timeout, or crash | Immediate respawn to maintain `dirty_workers` count | | Dirty Arbiter | Exit or crash | Main arbiter respawns if not shutting down | The dirty arbiter maintains a target worker count and continuously spawns workers until the target is reached: ```python while len(self.workers) < self.num_workers: self.spawn_worker() ``` ### Monitoring Recommendations For production deployments, consider: 1. **Log monitoring** - Watch for "Worker timed out" messages indicating hung workers 2. **Process monitoring** - Use systemd or supervisord to monitor the main arbiter 3. **Metrics** - Track respawn frequency to detect unstable workers ```bash # Check for recent worker timeouts grep "Worker.*timed out" /var/log/gunicorn.log | tail -20 # Monitor process tree watch -n 1 'pstree -p $(cat gunicorn.pid)' ``` ## Error Handling The dirty client raises specific exceptions: ```python from gunicorn.dirty.errors import ( DirtyError, DirtyTimeoutError, DirtyConnectionError, DirtyAppError, DirtyAppNotFoundError, DirtyNoWorkersAvailableError, ) try: result = client.execute("myapp.ml:MLApp", "inference", "model", data) except DirtyTimeoutError: # Operation timed out pass except DirtyAppNotFoundError: # App not loaded in dirty workers pass except DirtyNoWorkersAvailableError as e: # No workers have this app (all crashed or app limited to 0 workers) print(f"No workers for app: {e.app_path}") except DirtyAppError as e: # Error during app execution print(f"App error: {e.message}, traceback: {e.traceback}") except DirtyConnectionError: # Connection to dirty arbiter failed pass ``` ## Best Practices 1. **Pre-load commonly used resources** in `init()` to avoid cold starts 2. **Set appropriate timeouts** based on your workload 3. **Handle errors gracefully** - dirty workers may restart 4. **Use meaningful action names** for easier debugging 5. **Keep responses serializable** - results are passed via binary IPC (supports bytes directly) ## Monitoring Monitor dirty workers using standard process monitoring: ```bash # Check dirty arbiter and workers ps aux | grep "dirty" # View logs tail -f gunicorn.log | grep dirty ``` ## Example: Image Processing ```python # myapp/images.py from gunicorn.dirty import DirtyApp from PIL import Image import io class ImageApp(DirtyApp): def init(self): # Pre-import heavy libraries import cv2 self.cv2 = cv2 def resize(self, image_data, width, height): """Resize an image.""" img = Image.open(io.BytesIO(image_data)) resized = img.resize((width, height)) buffer = io.BytesIO() resized.save(buffer, format='PNG') return buffer.getvalue() def thumbnail(self, image_data, size=128): """Create a thumbnail.""" img = Image.open(io.BytesIO(image_data)) img.thumbnail((size, size)) buffer = io.BytesIO() img.save(buffer, format='JPEG') return buffer.getvalue() def close(self): pass ``` Usage: ```python from gunicorn.dirty import get_dirty_client def upload_image(request): client = get_dirty_client() # Create thumbnail in dirty worker thumbnail = client.execute( "myapp.images:ImageApp", "thumbnail", request.files['image'].read(), size=256 ) return save_thumbnail(thumbnail) ``` ## Complete Examples For full working examples with Docker deployment, see: - [Embedding Service Example](https://github.com/benoitc/gunicorn/tree/master/examples/embedding_service) - FastAPI-based text embedding API using sentence-transformers with dirty workers for ML model management. - [Streaming Chat Example](https://github.com/benoitc/gunicorn/tree/master/examples/streaming_chat) - Simulated LLM chat with token-by-token SSE streaming, demonstrating dirty worker generators and real-time response delivery. benoitc-gunicorn-f5fb19e/docs/content/faq.md000066400000000000000000000107241514360242400211620ustar00rootroot00000000000000 # FAQ ## WSGI bits ### How do I set `SCRIPT_NAME`? By default `SCRIPT_NAME` is an empty string. Set it via an environment variable or HTTP header. Because the header contains an underscore it is only accepted from trusted forwarders listed in [`forwarded_allow_ips`](reference/settings.md#forwarded_allow_ips). !!! note If your application should appear under a subfolder, `SCRIPT_NAME` typically starts with a single leading slash and no trailing slash. ## Server stuff ### How do I reload my application in Gunicorn? Send `HUP` to the master process for a graceful reload: ```bash kill -HUP masterpid ``` ### How might I test a proxy configuration? Use [Hey](https://github.com/rakyll/hey) to confirm that your proxy buffers responses correctly for synchronous workers: ```bash hey -n 10000 -c 100 http://127.0.0.1:5000/ ``` That benchmark issues 10,000 requests with a concurrency of 100. ### How can I name processes? Install [setproctitle](https://pypi.python.org/pypi/setproctitle) to give Gunicorn processes meaningful names in tools such as `ps` and `top`. This helps when running multiple Gunicorn instances. See the [`proc_name`](reference/settings.md#proc_name) setting for details. ### Why is there no HTTP keep-alive? The default sync workers target Nginx, which uses HTTP/1.0 for upstream connections. If you need to serve unbuffered internet traffic directly, pick an async worker instead. ## Worker processes ### How do I know which type of worker to use? Read the [design guide](design.md) for guidance on worker types. ### What types of workers are available? See the [`worker_class`](reference/settings.md#worker_class) configuration reference. ### How can I figure out the best number of worker processes? Follow the recommendations for tuning the [`number of workers`](design.md#how-many-workers). ### How can I change the number of workers dynamically? Send `TTIN` or `TTOU` to the master process: ```bash kill -TTIN $masterpid # increment workers kill -TTOU $masterpid # decrement workers ``` ### Does Gunicorn suffer from the thundering herd problem? Potentially, when many sleeping handlers wake simultaneously but only one takes the request. There is ongoing work to mitigate this ([issue #792](https://github.com/benoitc/gunicorn/issues/792)). Monitor load if you use large numbers of workers or threads. ### Why don't I see logs in the console? Gunicorn 19.0 disabled console logging by default. Use `--log-file=-` to stream logs to stdout. Console logging returned in 19.2. ## Kernel parameters High-concurrency deployments may need kernel tuning. These Linux-oriented tips apply to any network service. ### How can I increase the maximum number of file descriptors? Raise the per-process limit (remember sockets count as files). Running `sudo ulimit` is ineffective—switch to root, adjust the limit, then launch Gunicorn. Consider managing limits via systemd service units or init scripts. ### How can I increase the maximum socket backlog? Increase the queue of pending connections: ```bash sudo sysctl -w net.core.somaxconn="2048" ``` ### How can I disable the use of `sendfile()`? Pass `--no-sendfile` or set the `SENDFILE=0` environment variable. ## Troubleshooting ### Django reports `ImproperlyConfigured` Asynchronous workers may break `django.core.urlresolvers.reverse`. Use `reverse_lazy` instead. ### How do I avoid blocking in `os.fchmod`? Gunicorn's heartbeat touches temporary files. On disk-backed filesystems (for example `/tmp` on some distributions) `os.fchmod` can block if I/O stalls or the filesystem fills up. Mount a `tmpfs` and point `--worker-tmp-dir` to it. Check whether `/tmp` is RAM-backed: ```bash df /tmp ``` If not, create a new `tmpfs` mount: ```bash sudo cp /etc/fstab /etc/fstab.orig sudo mkdir /mem echo 'tmpfs /mem tmpfs defaults,size=64m,mode=1777,noatime,comment=for-gunicorn 0 0' | sudo tee -a /etc/fstab sudo mount /mem ``` Verify the result: ```bash df /mem ``` Then start Gunicorn with `--worker-tmp-dir /mem`. ### Why are workers silently killed? If a worker vanishes without logs, check for `SIGKILL`. Reverse proxies may show `502` responses while Gunicorn logs only new worker startups (for example, `[INFO] Booting worker`). A common culprit is the OOM killer in cgroups-limited environments. Inspect kernel logs: ```bash dmesg | grep gunicorn ``` If you see messages similar to `Memory cgroup out of memory ... Killed process (gunicorn.md)`, raise memory limits or adjust OOM behaviour. benoitc-gunicorn-f5fb19e/docs/content/guides/000077500000000000000000000000001514360242400213455ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/guides/docker.md000066400000000000000000000202551514360242400231420ustar00rootroot00000000000000# Docker Deployment Running Gunicorn in Docker containers is the most common deployment pattern for modern Python applications. This guide covers best practices for containerizing Gunicorn applications. ## Official Docker Image Gunicorn provides an official Docker image on GitHub Container Registry: ```bash docker pull ghcr.io/benoitc/gunicorn:latest ``` ### Quick Start Mount your application directory and run: ```bash docker run -p 8000:8000 -v $(pwd):/app ghcr.io/benoitc/gunicorn app:app ``` ### Running in Background Use `-d` (detached mode) to run the container in the background: ```bash # Start in background docker run -d --name myapp -p 8000:8000 -v $(pwd):/app ghcr.io/benoitc/gunicorn app:app # View logs docker logs myapp # Follow logs in real-time docker logs -f myapp # Stop the container docker stop myapp # Start it again docker start myapp # Remove the container docker rm myapp ``` ### Environment Variables | Variable | Description | Default | |----------|-------------|---------| | `GUNICORN_BIND` | Full bind address | `0.0.0.0:8000` | | `GUNICORN_HOST` | Bind host | `0.0.0.0` | | `GUNICORN_PORT` | Bind port | `8000` | | `GUNICORN_WORKERS` | Number of workers | `(2 * CPU) + 1` | | `GUNICORN_ARGS` | Additional arguments | (none) | ### With Configuration ```bash docker run -p 9000:9000 -v $(pwd):/app \ -e GUNICORN_PORT=9000 \ -e GUNICORN_WORKERS=4 \ -e GUNICORN_ARGS="--timeout 120 --access-logfile -" \ ghcr.io/benoitc/gunicorn app:app ``` ### As Base Image (Recommended for Production) ```dockerfile FROM ghcr.io/benoitc/gunicorn:24.1.0 # Install app dependencies COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt # Copy application COPY --chown=gunicorn:gunicorn . . CMD ["myapp:app", "--workers", "4"] ``` ### With Docker Compose ```yaml services: web: image: ghcr.io/benoitc/gunicorn:latest ports: - "8000:8000" volumes: - ./app:/app command: ["myapp:app", "--workers", "4"] ``` ### Available Tags - `ghcr.io/benoitc/gunicorn:latest` - Latest release - `ghcr.io/benoitc/gunicorn:24.1.0` - Specific version - `ghcr.io/benoitc/gunicorn:24.1` - Minor version - `ghcr.io/benoitc/gunicorn:24` - Major version ## Building Your Own Image For more control, build a custom image using the patterns below. ## Basic Dockerfile ```dockerfile FROM python:3.12-slim WORKDIR /app # Install dependencies COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt # Copy application COPY . . # Run gunicorn CMD ["gunicorn", "app:app", "--bind", "0.0.0.0:8000"] ``` Build and run: ```bash docker build -t myapp . docker run -p 8000:8000 myapp ``` ## Production Configuration ### Environment Variables Use environment variables for configuration: ```dockerfile FROM python:3.12-slim WORKDIR /app COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt COPY . . # Configuration via environment ENV GUNICORN_WORKERS=4 ENV GUNICORN_BIND=0.0.0.0:8000 CMD gunicorn app:app \ --workers ${GUNICORN_WORKERS} \ --bind ${GUNICORN_BIND} ``` Or use `GUNICORN_CMD_ARGS`: ```dockerfile ENV GUNICORN_CMD_ARGS="--workers=4 --bind=0.0.0.0:8000" CMD ["gunicorn", "app:app"] ``` ### Worker Count In containers, determine workers based on available CPU: ```python # gunicorn.conf.py import multiprocessing workers = multiprocessing.cpu_count() * 2 + 1 bind = "0.0.0.0:8000" ``` Or let Kubernetes/Docker limit CPU and calculate accordingly: ```bash # At runtime gunicorn app:app --workers $(( 2 * $(nproc) + 1 )) ``` ### Non-Root User Run as a non-root user for security: ```dockerfile FROM python:3.12-slim # Create non-root user RUN useradd --create-home appuser WORKDIR /home/appuser/app COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt COPY --chown=appuser:appuser . . USER appuser CMD ["gunicorn", "app:app", "--bind", "0.0.0.0:8000"] ``` ### Health Checks Add a health check endpoint and Docker health check: ```dockerfile HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ CMD curl -f http://localhost:8000/health || exit 1 ``` ## Multi-Stage Build Reduce image size with multi-stage builds: ```dockerfile # Build stage FROM python:3.12 AS builder WORKDIR /app COPY requirements.txt . RUN pip wheel --no-cache-dir --wheel-dir /wheels -r requirements.txt # Runtime stage FROM python:3.12-slim WORKDIR /app # Copy wheels and install COPY --from=builder /wheels /wheels RUN pip install --no-cache-dir /wheels/* && rm -rf /wheels COPY . . CMD ["gunicorn", "app:app", "--bind", "0.0.0.0:8000", "--workers", "4"] ``` ## Docker Compose Example `docker-compose.yml`: ```yaml services: web: build: . ports: - "8000:8000" environment: - DATABASE_URL=postgres://db:5432/myapp depends_on: - db deploy: resources: limits: cpus: '2' memory: 512M db: image: postgres:15 environment: - POSTGRES_DB=myapp - POSTGRES_PASSWORD=secret volumes: - postgres_data:/var/lib/postgresql/data nginx: image: nginx:alpine ports: - "80:80" volumes: - ./nginx.conf:/etc/nginx/nginx.conf:ro depends_on: - web volumes: postgres_data: ``` ## Kubernetes Deployment Example Kubernetes deployment: ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: myapp spec: replicas: 3 selector: matchLabels: app: myapp template: metadata: labels: app: myapp spec: containers: - name: myapp image: myapp:latest ports: - containerPort: 8000 env: - name: GUNICORN_WORKERS value: "4" resources: limits: cpu: "1" memory: "512Mi" requests: cpu: "500m" memory: "256Mi" livenessProbe: httpGet: path: /health port: 8000 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: httpGet: path: /health port: 8000 initialDelaySeconds: 5 periodSeconds: 5 --- apiVersion: v1 kind: Service metadata: name: myapp spec: selector: app: myapp ports: - port: 80 targetPort: 8000 ``` ## Graceful Shutdown Gunicorn handles `SIGTERM` gracefully by default. Configure the timeout: ```dockerfile CMD ["gunicorn", "app:app", \ "--bind", "0.0.0.0:8000", \ "--graceful-timeout", "30", \ "--timeout", "120"] ``` Match Docker's stop timeout: ```yaml # docker-compose.yml services: web: stop_grace_period: 30s ``` ## Logging Log to stdout/stderr for Docker log collection: ```python # gunicorn.conf.py accesslog = "-" errorlog = "-" loglevel = "info" ``` Use JSON logging for log aggregation: ```python # gunicorn.conf.py import json import datetime class JsonFormatter: def format(self, record): return json.dumps({ "timestamp": datetime.datetime.utcnow().isoformat(), "level": record.levelname, "message": record.getMessage(), }) logconfig_dict = { "version": 1, "formatters": { "json": {"()": JsonFormatter} }, "handlers": { "console": { "class": "logging.StreamHandler", "formatter": "json", "stream": "ext://sys.stdout" } }, "root": { "handlers": ["console"], "level": "INFO" } } ``` ## Troubleshooting ### Worker Timeout If workers are killed with `[CRITICAL] WORKER TIMEOUT`, increase the timeout: ```bash gunicorn app:app --timeout 120 ``` Or investigate slow requests in your application. ### Out of Memory If containers are OOM-killed: 1. Reduce worker count 2. Use `--max-requests` to restart workers periodically 3. Increase container memory limits ```bash gunicorn app:app --workers 2 --max-requests 1000 --max-requests-jitter 100 ``` ### Connection Reset If you see connection resets, ensure: 1. Load balancer health checks match your `/health` endpoint 2. Graceful timeout is sufficient for in-flight requests 3. Keepalive settings match between Gunicorn and upstream proxy ## See Also - [Deploy](../deploy.md) - General deployment patterns - [Settings](../reference/settings.md) - All configuration options benoitc-gunicorn-f5fb19e/docs/content/guides/gunicornc.md000066400000000000000000000145421514360242400236640ustar00rootroot00000000000000--- title: Control Interface (gunicornc) menu: guides: weight: 15 --- # Control Interface (gunicornc) Gunicorn provides a control interface similar to [birdc](https://bird.network.cz/?get_doc&v=20&f=bird-3.html) for the BIRD routing daemon. This allows you to inspect and manage a running Gunicorn instance via a Unix socket. ## Overview The control interface consists of two parts: 1. **Control Socket Server** - Runs in the arbiter process, accepts commands via Unix socket 2. **gunicornc CLI** - Interactive client that connects to the control socket ## Quick Start ### Start Gunicorn with Control Socket By default, Gunicorn creates a control socket at `gunicorn.ctl` in the current directory: ```bash gunicorn -w 4 myapp:app ``` Or specify a custom path: ```bash gunicorn --control-socket /tmp/myapp.ctl -w 4 myapp:app ``` ### Connect with gunicornc ```bash # Connect to default socket (./gunicorn.ctl) gunicornc # Connect to custom socket gunicornc -s /tmp/myapp.ctl # Run a single command gunicornc -c "show workers" # Output as JSON (for scripting) gunicornc -c "show stats" -j ``` ## Interactive Mode When run without the `-c` flag, gunicornc enters interactive mode with readline support: ``` $ gunicornc Connected to gunicorn.ctl Type 'help' for available commands, 'quit' to exit. gunicorn> show workers PID AGE BOOTED LAST_BEAT ---------------------------------------- 12345 1 yes 0.2s ago 12346 2 yes 0.1s ago 12347 3 yes 0.3s ago Total: 3 workers gunicorn> worker add 2 { "added": 2, "previous": 3, "total": 5 } gunicorn> quit ``` ## Commands ### Show Commands | Command | Description | |---------|-------------| | `show all` | Overview of all processes (arbiter, web workers, dirty workers) | | `show workers` | List HTTP workers with status | | `show dirty` | List dirty workers and apps | | `show config` | Show current effective configuration | | `show stats` | Show server statistics | | `show listeners` | Show bound sockets | | `help` | Show available commands | ### Worker Management | Command | Description | |---------|-------------| | `worker add [N]` | Spawn N workers (default 1) | | `worker remove [N]` | Remove N workers (default 1) | | `worker kill ` | Gracefully terminate specific worker | ### Dirty Worker Management | Command | Description | |---------|-------------| | `dirty add [N]` | Spawn N dirty workers (default 1) | | `dirty remove [N]` | Remove N dirty workers (default 1) | !!! note "Per-App Worker Limits" When using `dirty add`, workers only load apps that haven't reached their worker limits. If all apps are at their limits, no new workers will be spawned. The response will include a `reason` field explaining this. ### Server Control | Command | Description | |---------|-------------| | `reload` | Graceful reload (equivalent to SIGHUP) | | `reopen` | Reopen log files (equivalent to SIGUSR1) | | `shutdown [graceful\|quick]` | Shutdown server (SIGTERM or SIGINT) | ## Example Session ``` $ gunicornc Connected to gunicorn.ctl Type 'help' for available commands, 'quit' to exit. gunicorn> show all ARBITER (master) PID: 12345 WEB WORKERS (4) PID AGE BOOTED LAST_BEAT -------------------------------------- 12346 1 yes 0.05s ago 12347 2 yes 0.04s ago 12348 3 yes 0.03s ago 12349 4 yes 0.02s ago DIRTY ARBITER PID: 12350 DIRTY WORKERS (2) PID AGE APPS -------------------------------------------------- 12351 1 MLModel ImageProcessor 12352 2 MLModel gunicorn> show stats Uptime: 2h 15m 30s PID: 12345 Workers current: 4 Workers target: 4 Workers spawned: 6 Workers killed: 2 Reloads: 1 gunicorn> worker add { "added": 1, "previous": 4, "total": 5 } gunicorn> dirty add 1 { "success": true, "operation": "add", "requested": 1, "spawned": 1, "total_workers": 3, "target_workers": 3 } gunicorn> quit ``` ## Configuration ### Settings | Setting | CLI Flag | Default | Description | |---------|----------|---------|-------------| | `control_socket` | `--control-socket` | `gunicorn.ctl` | Unix socket path | | `control_socket_mode` | `--control-socket-mode` | `0o600` | Socket file permissions | | `control_socket_disable` | `--no-control-socket` | `False` | Disable control socket | ### Example Configuration ```python # gunicorn.conf.py bind = "0.0.0.0:8000" workers = 4 # Control socket settings control_socket = "/var/run/gunicorn/myapp.ctl" control_socket_mode = 0o660 # Allow group access ``` ## Scripting Use the `-j` flag for JSON output when scripting: ```bash #!/bin/bash # Get current worker count workers=$(gunicornc -c "show stats" -j | jq -r '.workers_current') echo "Current workers: $workers" # Scale up if needed if [ "$workers" -lt 8 ]; then gunicornc -c "worker add $((8 - workers))" fi ``` ## Security The control socket uses filesystem permissions for access control: - **Default mode**: `0o600` (owner only) - **No authentication**: Relies on filesystem permissions - **Unix socket only**: No TCP/remote access To allow group access: ```python control_socket_mode = 0o660 ``` To disable the control socket entirely: ```bash gunicorn --no-control-socket myapp:app ``` ## Protocol The control interface uses a JSON-based protocol with length-prefixed framing: ``` +----------------+------------------+ | Length (4B BE) | JSON Payload | +----------------+------------------+ ``` ### Request Format ```json { "id": 1, "command": "show workers" } ``` ### Response Format ```json { "id": 1, "status": "ok", "data": { ... } } ``` ### Error Response ```json { "id": 1, "status": "error", "error": "Unknown command: foo" } ``` ## Troubleshooting ### Cannot connect to socket ``` Error: Connection refused ``` - Check that Gunicorn is running - Verify the socket path is correct - Check socket file permissions ### Permission denied ``` Error: Permission denied ``` - Check that you have read/write access to the socket file - The socket is created with mode `0o600` by default (owner only) ### Socket not found ``` Error: No such file or directory ``` - Gunicorn creates the socket relative to the working directory by default - Use an absolute path with `--control-socket /path/to/socket.ctl` - Check if `--no-control-socket` was specified benoitc-gunicorn-f5fb19e/docs/content/guides/http2.md000066400000000000000000000563501514360242400227410ustar00rootroot00000000000000# HTTP/2 Support !!! warning "Beta Feature" HTTP/2 support is a beta feature introduced in Gunicorn 25.0.0. While it has been tested, the API and behavior may change in future releases. Please report any issues on [GitHub](https://github.com/benoitc/gunicorn/issues). Gunicorn supports HTTP/2 (RFC 7540) for improved performance with modern clients. HTTP/2 provides multiplexed streams, header compression, and other optimizations over HTTP/1.1. ## Quick Start ```bash # Install gunicorn with HTTP/2 support pip install gunicorn[http2] # Run with HTTP/2 enabled (requires SSL) gunicorn myapp:app \ --worker-class gthread \ --threads 4 \ --certfile server.crt \ --keyfile server.key \ --http-protocols h2,h1 ``` ## Requirements HTTP/2 support requires: - **SSL/TLS**: HTTP/2 uses ALPN (Application-Layer Protocol Negotiation) which requires an encrypted connection - **h2 library**: Install with `pip install gunicorn[http2]` or `pip install h2` - **Compatible worker**: gthread, gevent, or ASGI workers ## Configuration ### Enable HTTP/2 Enable HTTP/2 by setting the `--http-protocols` option: ```bash gunicorn myapp:app --http-protocols h2,h1 ``` Or in a configuration file: ```python # gunicorn.conf.py http_protocols = ["h2", "h1"] ``` The order matters for ALPN negotiation - protocols are tried in order of preference. | Protocol | Description | |----------|-------------| | `h2` | HTTP/2 over TLS | | `h1` | HTTP/1.1 (fallback) | !!! note Always include `h1` as a fallback for clients that don't support HTTP/2. ### SSL/TLS Configuration HTTP/2 requires SSL/TLS. Configure certificates: ```bash gunicorn myapp:app \ --certfile /path/to/server.crt \ --keyfile /path/to/server.key \ --http-protocols h2,h1 ``` Or in a configuration file: ```python # gunicorn.conf.py certfile = "/path/to/server.crt" keyfile = "/path/to/server.key" http_protocols = ["h2", "h1"] ``` ### HTTP/2 Settings Fine-tune HTTP/2 behavior with these settings: | Setting | Default | Description | |---------|---------|-------------| | `http2_max_concurrent_streams` | 100 | Maximum concurrent streams per connection | | `http2_initial_window_size` | 65535 | Initial flow control window size (bytes) | | `http2_max_frame_size` | 16384 | Maximum frame size (bytes) | | `http2_max_header_list_size` | 65536 | Maximum header list size (bytes) | Example configuration: ```python # gunicorn.conf.py http_protocols = ["h2", "h1"] http2_max_concurrent_streams = 200 http2_initial_window_size = 1048576 # 1MB ``` ## Worker Compatibility Not all workers support HTTP/2: | Worker | HTTP/2 Support | Notes | |--------|----------------|-------| | `sync` | No | Single-threaded, cannot multiplex streams | | `gthread` | Yes | Recommended for HTTP/2 | | `gevent` | Yes | Requires gevent | | `eventlet` | Yes | **Deprecated** - will be removed in 26.0 | | `asgi` | Yes | For async frameworks | | `tornado` | No | Tornado handles its own protocol | If you use the sync or tornado worker with HTTP/2 enabled, Gunicorn will log a warning and fall back to HTTP/1.1. ### Recommended: gthread Worker For HTTP/2, the gthread worker is recommended: ```bash gunicorn myapp:app \ --worker-class gthread \ --threads 4 \ --workers 2 \ --http-protocols h2,h1 \ --certfile server.crt \ --keyfile server.key ``` ## HTTP 103 Early Hints Gunicorn supports HTTP 103 Early Hints (RFC 8297), allowing servers to send resource hints before the final response. This enables browsers to preload CSS, JavaScript, and other assets in parallel. ### WSGI Applications Use the `wsgi.early_hints` callback in your WSGI application: ```python def app(environ, start_response): # Send early hints if available if 'wsgi.early_hints' in environ: environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ('Link', '; rel=preload; as=script'), ]) # Continue with the actual response start_response('200 OK', [('Content-Type', 'text/html')]) return [b'...'] ``` ### ASGI Applications Use the `http.response.informational` message type: ```python async def app(scope, receive, send): # Send early hints await send({ "type": "http.response.informational", "status": 103, "headers": [ (b"link", b"; rel=preload; as=style"), (b"link", b"; rel=preload; as=script"), ], }) # Send the actual response await send({ "type": "http.response.start", "status": 200, "headers": [(b"content-type", b"text/html")], }) await send({ "type": "http.response.body", "body": b"...", }) ``` !!! note Early hints are only sent to HTTP/1.1+ clients. HTTP/1.0 clients silently ignore the callback since they don't support 1xx responses. ## Stream Priority HTTP/2 allows clients to indicate the relative priority of streams using PRIORITY frames (RFC 7540 Section 5.3). Gunicorn tracks stream priorities and exposes them to both WSGI and ASGI applications. ### Accessing Priority in WSGI Priority information is available in the WSGI environ for HTTP/2 requests: ```python def app(environ, start_response): # Access stream priority (HTTP/2 only) weight = environ.get('gunicorn.http2.priority_weight') depends_on = environ.get('gunicorn.http2.priority_depends_on') if weight is not None: # This is an HTTP/2 request with priority info # Higher weight = client considers this more important print(f"Request priority: weight={weight}, depends_on={depends_on}") start_response('200 OK', [('Content-Type', 'text/plain')]) return [b'OK'] ``` | Environ Key | Range | Default | Description | |-------------|-------|---------|-------------| | `gunicorn.http2.priority_weight` | 1-256 | 16 | Higher weight = more resources | | `gunicorn.http2.priority_depends_on` | Stream ID | 0 | Parent stream (0 = root) | ### Accessing Priority in ASGI For ASGI applications, priority is available in the scope's `extensions` dict: ```python async def app(scope, receive, send): if scope["type"] == "http": # Check for HTTP/2 priority extension extensions = scope.get("extensions", {}) priority = extensions.get("http.response.priority") if priority: weight = priority["weight"] # 1-256 depends_on = priority["depends_on"] # Parent stream ID print(f"Request priority: weight={weight}, depends_on={depends_on}") await send({ "type": "http.response.start", "status": 200, "headers": [(b"content-type", b"text/plain")], }) await send({ "type": "http.response.body", "body": b"OK", }) ``` | Extension Key | Field | Range | Default | Description | |---------------|-------|-------|---------|-------------| | `http.response.priority` | `weight` | 1-256 | 16 | Higher weight = more resources | | `http.response.priority` | `depends_on` | Stream ID | 0 | Parent stream (0 = root) | !!! note Stream priority is advisory. Applications can use it for scheduling decisions, but Gunicorn does not enforce priority-based request ordering. Priority information is only present for HTTP/2 requests. ## Response Trailers HTTP/2 supports trailing headers (trailers) sent after the response body. This is commonly used for gRPC status codes, checksums, and timing information. ### WSGI Applications For WSGI applications, use the `gunicorn.http2.send_trailers` callback in the environ: ```python def app(environ, start_response): # Get trailer callback (HTTP/2 only) send_trailers = environ.get('gunicorn.http2.send_trailers') # Announce trailers in response headers headers = [ ('Content-Type', 'application/grpc'), ('Trailer', 'grpc-status, grpc-message'), ] start_response('200 OK', headers) # Yield response body yield b'response data' # Send trailers after body (if available) if send_trailers: send_trailers([ ('grpc-status', '0'), ('grpc-message', 'OK'), ]) ``` ### ASGI Applications For ASGI applications, use the `http.response.trailers` extension: ```python async def app(scope, receive, send): # Send response with trailers flag await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/grpc"), (b"trailer", b"grpc-status, grpc-message"), ], }) # Send body await send({ "type": "http.response.body", "body": b"response data", "more_body": False, }) # Send trailers (HTTP/2 only) if "http.response.trailers" in scope.get("extensions", {}): await send({ "type": "http.response.trailers", "headers": [ (b"grpc-status", b"0"), (b"grpc-message", b"OK"), ], }) ``` ### Trailer Rules (RFC 7540) - Trailers MUST NOT include pseudo-headers (`:status`, `:path`, etc.) - Announce trailers using the `Trailer` response header - Trailers are only available in HTTP/2 (HTTP/1.1 chunked encoding not supported) ### Common Use Cases | Use Case | Trailer Headers | |----------|-----------------| | gRPC | `grpc-status`, `grpc-message` | | Checksums | `Content-MD5`, `Digest` | | Timing | `Server-Timing` | | Signatures | `Signature` | ## Production Deployment ### With Nginx Configure nginx to proxy HTTP/2 connections to Gunicorn: ```nginx upstream gunicorn { server 127.0.0.1:8443; keepalive 32; } server { listen 443 ssl; http2 on; server_name example.com; ssl_certificate /path/to/server.crt; ssl_certificate_key /path/to/server.key; ssl_protocols TLSv1.2 TLSv1.3; # Forward 103 Early Hints (requires nginx 1.29+) location / { proxy_pass https://gunicorn; proxy_http_version 1.1; proxy_ssl_verify off; early_hints $http2; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; } } ``` !!! note For nginx to forward 103 Early Hints from upstream, you need nginx 1.29+ and the [`early_hints`](https://nginx.org/en/docs/http/ngx_http_core_module.html#early_hints) directive. ### Direct TLS Termination For simpler deployments, Gunicorn can terminate TLS directly: ```python # gunicorn.conf.py bind = "0.0.0.0:443" worker_class = "gthread" threads = 4 workers = 4 # SSL certfile = "/etc/letsencrypt/live/example.com/fullchain.pem" keyfile = "/etc/letsencrypt/live/example.com/privkey.pem" # HTTP/2 http_protocols = ["h2", "h1"] http2_max_concurrent_streams = 100 ``` ### Recommended Settings For production HTTP/2 deployments: ```python # gunicorn.conf.py worker_class = "gthread" workers = 4 threads = 4 keepalive = 120 # HTTP/2 connections are long-lived # SSL/TLS certfile = "/path/to/server.crt" keyfile = "/path/to/server.key" ssl_version = "TLSv1_2" # Minimum TLS 1.2 for HTTP/2 # HTTP/2 http_protocols = ["h2", "h1"] http2_max_concurrent_streams = 100 http2_initial_window_size = 65535 ``` ## Troubleshooting ### HTTP/2 not negotiated If clients fall back to HTTP/1.1: 1. Verify SSL is configured correctly 2. Check that `h2` is in `--http-protocols` 3. Ensure the h2 library is installed: `pip install h2` 4. Verify ALPN support: `openssl s_client -alpn h2 -connect host:port` ### Worker doesn't support HTTP/2 If you see "HTTP/2 is not supported by the sync worker": ```bash # Switch to gthread worker gunicorn myapp:app --worker-class gthread --threads 4 ``` ### Connection errors with large requests Increase flow control window sizes: ```python http2_initial_window_size = 1048576 # 1MB http2_max_frame_size = 32768 # 32KB ``` ### Too many concurrent streams If clients report stream limit errors: ```python http2_max_concurrent_streams = 200 # Increase from default 100 ``` ## Performance Tuning HTTP/2 performance depends on proper tuning of both Gunicorn and system settings. This section covers different tuning profiles and their trade-offs. ### Tuning Profiles #### Conservative (Default) Best for: Low to moderate traffic, memory-constrained environments. ```python # gunicorn.conf.py - Conservative profile workers = 2 worker_class = "gthread" threads = 4 http2_max_concurrent_streams = 100 http2_initial_window_size = 65535 # 64KB http2_max_frame_size = 16384 # 16KB ``` | Pros | Cons | |------|------| | Low memory footprint | Limited throughput at high concurrency | | Safe defaults per RFC | More round-trips for large transfers | | Works on constrained systems | May bottleneck at ~10K req/s | #### Balanced Best for: Moderate traffic, general production use. ```python # gunicorn.conf.py - Balanced profile workers = 4 worker_class = "gthread" threads = 4 backlog = 2048 http2_max_concurrent_streams = 128 http2_initial_window_size = 262144 # 256KB http2_max_frame_size = 16384 # 16KB ``` | Pros | Cons | |------|------| | Good throughput (15K+ req/s) | More memory per connection | | Handles traffic spikes | Requires more CPU | | Good balance of resources | | #### High Concurrency Best for: High traffic APIs, microservices, load testing. ```python # gunicorn.conf.py - High concurrency profile workers = 4 worker_class = "gthread" threads = 8 backlog = 2048 worker_connections = 10000 http2_max_concurrent_streams = 256 http2_initial_window_size = 1048576 # 1MB http2_max_frame_size = 32768 # 32KB ``` | Pros | Cons | |------|------| | High throughput (20K+ req/s) | Higher memory usage (~4x conservative) | | Handles 1000s of clients | Requires system tuning | | Better large transfer performance | May overwhelm downstream services | ### Setting Trade-offs #### `http2_max_concurrent_streams` Controls how many simultaneous streams a client can open per connection. | Value | Memory | Throughput | Use Case | |-------|--------|------------|----------| | 50-100 | Low | Moderate | APIs with small payloads | | 128-256 | Medium | High | General web applications | | 500+ | High | Very High | Streaming, real-time apps | !!! warning Very high values (500+) can lead to resource exhaustion under attack. Use with rate limiting. #### `http2_initial_window_size` Flow control window size determines how much data can be sent before waiting for acknowledgment. | Value | Memory | Latency | Use Case | |-------|--------|---------|----------| | 65535 (64KB) | Low | Higher for large transfers | Default, memory-constrained | | 262144 (256KB) | Medium | Balanced | General use | | 1048576 (1MB) | High | Lower for large transfers | Large file transfers, streaming | !!! note Larger windows improve throughput for large responses but increase memory usage per stream. Calculate: `max_streams × window_size × connections`. #### `http2_max_frame_size` Maximum size of individual HTTP/2 frames. | Value | Memory | Efficiency | Use Case | |-------|--------|------------|----------| | 16384 (16KB) | Low | More frames for large data | Default, RFC minimum | | 32768 (32KB) | Medium | Balanced | General use | | 65536 (64KB) | Higher | Fewer frames | Large payloads | ### System Tuning (Linux) For high concurrency (1000+ clients), tune these kernel parameters: ```bash # /etc/sysctl.conf or /etc/sysctl.d/99-gunicorn.conf # Increase socket backlog for burst connections net.core.somaxconn = 65535 net.ipv4.tcp_max_syn_backlog = 65535 # Increase network queue size net.core.netdev_max_backlog = 65535 # Expand ephemeral port range net.ipv4.ip_local_port_range = 1024 65535 # Allow reuse of TIME_WAIT sockets net.ipv4.tcp_tw_reuse = 1 # Increase max open files system-wide fs.file-max = 2097152 ``` Apply with: `sudo sysctl -p` Also increase file descriptor limits: ```bash # /etc/security/limits.conf * soft nofile 65535 * hard nofile 65535 ``` ### Docker Tuning For Docker deployments, add these to your container or compose file: ```yaml # docker-compose.yml services: gunicorn: ulimits: nofile: soft: 65535 hard: 65535 sysctls: net.core.somaxconn: 65535 ``` Or in Dockerfile: ```dockerfile # Increase file descriptor limit RUN ulimit -n 65535 ``` ### Benchmark Results Reference benchmarks using h2load with 4 Gunicorn workers in Docker (Apple M4 Pro): | Profile | Clients | Streams | Requests/sec | Latency (mean) | |---------|---------|---------|--------------|----------------| | Conservative | 100 | 10 | 11,700 | 69ms | | Conservative | 1000 | 10 | 12,750 | 441ms | | High Concurrency | 100 | 10 | 15,000+ | 50ms | | High Concurrency | 1000 | 10 | 21,700 | 253ms | | High Concurrency | 2000 | 10 | 12,300 | 243ms | !!! note Actual performance varies based on hardware, network, and application complexity. Always benchmark your specific workload. ## Testing HTTP/2 ### Using curl ```bash # Check HTTP/2 support curl -v --http2 https://localhost:443/ # Force HTTP/2 curl --http2-prior-knowledge https://localhost:443/ ``` ### Using Python ```python import httpx with httpx.Client(http2=True, verify=False) as client: response = client.get("https://localhost:8443/") print(f"HTTP Version: {response.http_version}") ``` ## Complete Example A complete HTTP/2 example demonstrating priority and trailers is available in the `examples/http2_features/` directory. This includes: - **http2_app.py**: ASGI application showing priority access and trailer sending - **test_http2.py**: Test script verifying HTTP/2 features - **Dockerfile** and **docker-compose.yml**: Docker setup for testing To run the example: ```bash cd examples/http2_features docker compose up --build # In another terminal: docker compose exec http2-features python /app/http2_features/test_http2.py ``` The example demonstrates: 1. **Priority access**: Reading `http.response.priority` extension in ASGI scope 2. **Response trailers**: Sending `http.response.trailers` messages 3. **Combined features**: Using both priority and trailers in one response ## RFC Compliance Gunicorn's HTTP/2 implementation is built on the [h2 library](https://github.com/python-hyper/h2) and complies with the following specifications: | Feature | RFC | Status | Notes | |---------|-----|--------|-------| | HTTP/2 Protocol | [RFC 7540](https://tools.ietf.org/html/rfc7540) | Compliant | Core protocol support | | HTTP/2 Semantics | [RFC 9113](https://tools.ietf.org/html/rfc9113) | Compliant | Updated HTTP/2 spec | | HPACK Compression | [RFC 7541](https://tools.ietf.org/html/rfc7541) | Compliant | Via h2 library | | Stream State Machine | RFC 7540 Section 5.1 | Compliant | Full state transitions | | Flow Control | RFC 7540 Section 6.9 | Compliant | Stream and connection level | | Stream Priority | RFC 7540 Section 5.3 | Compliant | Weight and dependency tracking | | Frame Size Limits | RFC 7540 Section 6.2 | Compliant | Validated 16384-16777215 bytes | | Pseudo-Headers | RFC 9113 Section 8.3 | Compliant | All required headers supported | | `:authority` Handling | RFC 9113 Section 8.3.1 | Compliant | Takes precedence over Host | | Response Trailers | RFC 9110 Section 6.5 | Compliant | Pseudo-headers forbidden | | GOAWAY Handling | RFC 7540 Section 6.8 | Compliant | Graceful shutdown | | RST_STREAM Handling | RFC 7540 Section 6.4 | Compliant | Stream reset | | Early Hints | [RFC 8297](https://tools.ietf.org/html/rfc8297) | Compliant | 103 informational responses | | Server Push | RFC 7540 Section 6.6 | Not Implemented | Optional feature, rarely used | !!! note Server Push (PUSH_PROMISE) is not implemented. This is an optional HTTP/2 feature that is being deprecated in HTTP/3 and is rarely used in practice. ## Security Considerations HTTP/2 introduces new attack vectors compared to HTTP/1.1. Gunicorn includes several protections against known vulnerabilities. ### Built-in Protections | Attack | Protection | Setting | |--------|------------|---------| | Stream Multiplexing Abuse | Limit concurrent streams | `http2_max_concurrent_streams` (default: 100) | | HPACK Bomb | Header size limits | `http2_max_header_list_size` (default: 65536) | | Large Frame Attack | Frame size limits | `http2_max_frame_size` (validated: 16384-16777215) | | Resource Exhaustion | Flow control windows | `http2_initial_window_size` (default: 65535) | | Slow Read (Slowloris) | Connection timeouts | `timeout` and `keepalive` settings | ### Recommended Security Settings ```python # gunicorn.conf.py - Security-hardened HTTP/2 configuration # Limit concurrent streams to prevent resource exhaustion http2_max_concurrent_streams = 100 # Limit header size to prevent HPACK bomb attacks http2_max_header_list_size = 65536 # 64KB # Standard frame size (RFC minimum) http2_max_frame_size = 16384 # Reasonable flow control window http2_initial_window_size = 65535 # 64KB # Connection timeouts to prevent slow attacks timeout = 30 keepalive = 120 graceful_timeout = 30 # Limit request sizes limit_request_line = 4094 limit_request_fields = 100 limit_request_field_size = 8190 ``` ### Additional Recommendations 1. **Use a reverse proxy**: Deploy behind nginx, HAProxy, or a cloud load balancer for additional DDoS protection and rate limiting. 2. **Enable rate limiting**: Use your reverse proxy to limit requests per client. 3. **Monitor connections**: Watch for clients opening many streams or holding connections open without sending data. 4. **Keep dependencies updated**: Regularly update the `h2` library for security fixes. For more information on HTTP/2 security vulnerabilities, see: - [Imperva HTTP/2 Vulnerability Report](https://www.imperva.com/docs/Imperva_HII_HTTP2.pdf) - [NGINX HTTP/2 Security Advisory](https://www.nginx.com/blog/the-imperva-http2-vulnerability-report-and-nginx/) ## Compliance Testing ### h2spec [h2spec](https://github.com/summerwind/h2spec) is the standard conformance testing tool for HTTP/2 implementations. It tests compliance with RFC 7540 and RFC 7541. ```bash # Install h2spec # macOS brew install h2spec # Linux (download from releases) curl -L https://github.com/summerwind/h2spec/releases/download/v2.6.0/h2spec_linux_amd64.tar.gz | tar xz # Run against your server h2spec -h localhost -p 8443 -t -k # Options: # -t Use TLS # -k Skip certificate verification # -S Strict mode (test SHOULD requirements) # -v Verbose output # -j Generate JUnit report ``` Example output: ``` Generic tests for HTTP/2 server 1. Starting HTTP/2 ✓ Sends a client connection preface ... Hypertext Transfer Protocol Version 2 (HTTP/2) 3. Starting HTTP/2 3.5. HTTP/2 Connection Preface ✓ Sends invalid connection preface ... 94 tests, 94 passed, 0 skipped, 0 failed ``` ### nghttp2 Tools [nghttp2](https://nghttp2.org/) provides useful debugging tools: ```bash # Install nghttp2 # macOS brew install nghttp2 # Linux apt-get install nghttp2-client # Test HTTP/2 connection nghttp -v https://localhost:8443/ # Benchmark with h2load h2load -n 1000 -c 10 https://localhost:8443/ ``` ### Online Testing For public servers, you can use online tools: - [KeyCDN HTTP/2 Test](https://tools.keycdn.com/http2-test) - [HTTP/2 Check](https://http.dev/2/test) ## See Also - [Settings Reference](../reference/settings.md#http2_max_concurrent_streams) - All HTTP/2 settings - [ASGI Worker](../asgi.md) - ASGI worker with HTTP/2 support - [Deploy](../deploy.md) - General deployment guidance benoitc-gunicorn-f5fb19e/docs/content/index.md000066400000000000000000000111661514360242400215230ustar00rootroot00000000000000--- template: home.html title: Gunicorn - Python WSGI HTTP Server ---

Serve Python on the Web

Battle-tested. Production-ready. One command to serve your Python apps.

$ pip install gunicorn $ gunicorn myapp:app # Listening at http://127.0.0.1:8000

Why Gunicorn?

Production-Proven

Trusted by thousands of companies. The pre-fork worker model handles traffic spikes gracefully.

Lightweight

Minimal dependencies, simple configuration. Efficient from containers to bare metal.

Compatible

Works with any WSGI or ASGI framework. Django, Flask, FastAPI—it just runs.

Works With Your Stack

WSGI and ASGI frameworks, no changes needed

Django Flask FastAPI Pyramid Starlette Falcon Bottle Quart

Support

Powering Python apps since 2010. Support continued development.

Become a Sponsor
benoitc-gunicorn-f5fb19e/docs/content/install.md000066400000000000000000000064271514360242400220660ustar00rootroot00000000000000# Installation !!! note Gunicorn requires **Python 3.12 or newer**. ## Quick Install === "pip" ```bash pip install gunicorn ``` === "pipx" ```bash pipx install gunicorn ``` === "Docker" ```bash docker pull ghcr.io/benoitc/gunicorn:latest docker run -p 8000:8000 -v $(pwd):/app ghcr.io/benoitc/gunicorn app:app ``` See the [Docker guide](guides/docker.md) for production configurations. === "System Packages" **Debian/Ubuntu:** ```bash sudo apt-get update sudo apt-get install gunicorn ``` **Fedora:** ```bash sudo dnf install python3-gunicorn ``` **Arch Linux:** ```bash sudo pacman -S gunicorn ``` !!! warning System packages may lag behind the latest release. For production, prefer pip installation in a virtual environment. ## Virtual Environment (Recommended) Always install Gunicorn inside a virtual environment to isolate dependencies: ```bash # Create virtual environment python -m venv venv # Activate it source venv/bin/activate # Linux/macOS # or: venv\Scripts\activate # Windows # Install gunicorn pip install gunicorn ``` ## From Source Install the latest development version from GitHub: ```bash pip install git+https://github.com/benoitc/gunicorn.git ``` Upgrade to the latest commit: ```bash pip install -U git+https://github.com/benoitc/gunicorn.git ``` ## Extra Packages Gunicorn provides optional extras for additional worker types and features. Install them with pip's bracket syntax: ```bash pip install gunicorn[gevent,setproctitle] ``` ### Worker Types | Extra | Description | |-------|-------------| | `gunicorn[gevent]` | Gevent-based greenlet workers | | `gunicorn[gthread]` | Threaded workers | | `gunicorn[tornado]` | Tornado-based workers (not recommended) | | `gunicorn[eventlet]` | **Deprecated** - will be removed in 26.0 | See the [design docs](design.md) for guidance on choosing worker types. ### Utilities | Extra | Description | |-------|-------------| | `gunicorn[setproctitle]` | Set process name in `ps`/`top` output | !!! tip If running multiple Gunicorn instances, use `setproctitle` with the [`proc_name`](reference/settings.md#proc_name) setting to distinguish them. ## Async Workers For applications using async I/O patterns, install the appropriate greenlet library: === "Gevent" ```bash pip install gunicorn[gevent] ``` Run with: ```bash gunicorn app:app --worker-class gevent ``` === "ASGI (asyncio)" No extra installation required: ```bash gunicorn app:app --worker-class asgi ``` For better performance, install uvloop: ```bash pip install uvloop gunicorn app:app --worker-class asgi --asgi-loop uvloop ``` !!! note Greenlet-based workers require the Python development headers. On Ubuntu: `sudo apt-get install python3-dev` ## Verify Installation Check the installed version: ```bash gunicorn --version ``` Test with a simple application: ```bash echo 'def app(e, s): s("200 OK", []); return [b"OK"]' > test_app.py gunicorn test_app:app # Visit http://127.0.0.1:8000 ``` ## Next Steps - [Quickstart](quickstart.md) - Get running in 5 minutes - [Run](run.md) - CLI usage and framework integration - [Configure](configure.md) - Configuration options benoitc-gunicorn-f5fb19e/docs/content/instrumentation.md000066400000000000000000000022571514360242400236600ustar00rootroot00000000000000 # Instrumentation !!! info "Added in 19.1" Gunicorn exposes optional instrumentation for the arbiter and workers using the statsD protocol over UDP. The `gunicorn.instrument.statsd` module turns Gunicorn into a statsD client. UDP keeps Gunicorn isolated from slow statsD consumers, so metrics collection does not impact request handling. Tell Gunicorn where the statsD server is located: ```bash gunicorn --statsd-host=localhost:8125 --statsd-prefix=service.app ... ``` The `Statsd` logger subclasses `gunicorn.glogging.Logger` and tracks: - `gunicorn.requests` — request rate per second - `gunicorn.request.duration` — request duration histogram (milliseconds.md) - `gunicorn.workers` — number of workers managed by the arbiter (gauge.md) - `gunicorn.log.critical` — rate of critical log messages - `gunicorn.log.error` — rate of error log messages - `gunicorn.log.warning` — rate of warning log messages - `gunicorn.log.exception` — rate of exceptional log messages See the [`statsd_host`](reference/settings.md#statsd_host) setting for additional options. [statsD](https://github.com/etsy/statsd) benoitc-gunicorn-f5fb19e/docs/content/news.md000066400000000000000000000346341514360242400213750ustar00rootroot00000000000000 # Changelog ## 25.1.0 - 2026-02-13 ### New Features - **Control Interface (gunicornc)**: Add interactive control interface for managing running Gunicorn instances, similar to birdc for BIRD routing daemon ([PR #3505](https://github.com/benoitc/gunicorn/pull/3505)) - Unix socket-based communication with JSON protocol - Interactive mode with readline support and command history - Commands: `show all/workers/dirty/config/stats/listeners` - Worker management: `worker add/remove/kill`, `dirty add/remove` - Server control: `reload`, `reopen`, `shutdown` - New settings: `--control-socket`, `--control-socket-mode`, `--no-control-socket` - New CLI tool: `gunicornc` for connecting to control socket - See [Control Interface Guide](guides/gunicornc.md) for details - **Dirty Stash**: Add global shared state between workers via `dirty.stash` ([PR #3503](https://github.com/benoitc/gunicorn/pull/3503)) - In-memory key-value store accessible by all workers - Supports get, set, delete, clear, keys, and has operations - Useful for sharing state like feature flags, rate limits, or cached data - **Dirty Binary Protocol**: Implement efficient binary protocol for dirty arbiter IPC using TLV (Type-Length-Value) encoding ([PR #3500](https://github.com/benoitc/gunicorn/pull/3500)) - More efficient than JSON for binary data - Supports all Python types: str, bytes, int, float, bool, None, list, dict - Better performance for large payloads - **Dirty TTIN/TTOU Signals**: Add dynamic worker scaling for dirty arbiters ([PR #3504](https://github.com/benoitc/gunicorn/pull/3504)) - Send SIGTTIN to increase dirty workers - Send SIGTTOU to decrease dirty workers - Respects minimum worker constraints from app configurations ### Changes - **ASGI Worker**: Promoted from beta to stable - **Dirty Arbiters**: Now marked as beta feature ### Documentation - Fix Markdown formatting in /configure documentation --- ## 25.0.3 - 2026-02-07 ### Bug Fixes - Fix RuntimeError when StopIteration is raised inside ASGI response body coroutine (PEP 479 compliance) - Fix deprecation warning for passing maxsplit as positional argument in `re.split()` (Python 3.13+) --- ## 25.0.2 - 2026-02-06 ### Bug Fixes - Fix ASGI concurrent request failures through nginx proxy by normalizing sockaddr tuples to handle both 2-tuple (IPv4) and 4-tuple (IPv6) formats ([PR #3485](https://github.com/benoitc/gunicorn/pull/3485)) - Fix graceful disconnect handling for ASGI worker to properly handle client disconnects without raising exceptions ([PR #3485](https://github.com/benoitc/gunicorn/pull/3485)) - Fix lazy import of dirty module for gevent compatibility - prevents import errors when concurrent.futures is imported before gevent monkey-patching ([PR #3483](https://github.com/benoitc/gunicorn/pull/3483)) ### Changes - Refactor: Extract `_normalize_sockaddr` utility function for consistent socket address handling across workers - Add license headers to all Python source files - Update copyright year to 2026 in LICENSE and NOTICE files --- ## 25.0.1 - 2026-02-02 ### Bug Fixes - Fix ASGI streaming responses (SSE) hanging: add chunked transfer encoding for HTTP/1.1 responses without Content-Length header. Without chunked encoding, clients wait for connection close to determine end-of-response. ### Changes - Update celery_alternative example to use FastAPI with native ASGI worker and uvloop for async task execution ### Testing - Add ASGI compliance test suite with Docker-based integration tests covering HTTP, WebSocket, streaming, lifespan, framework integration (Starlette, FastAPI), HTTP/2, and concurrency scenarios --- ## 25.0.0 - 2026-02-01 ### New Features - **Dirty Arbiters**: Separate process pool for executing long-running, blocking operations (AI model loading, heavy computation) without blocking HTTP workers ([PR #3460](https://github.com/benoitc/gunicorn/pull/3460)) - Inspired by Erlang's dirty schedulers - Asyncio-based with Unix socket IPC - Stateful workers that persist loaded resources - New settings: `--dirty-app`, `--dirty-workers`, `--dirty-timeout`, `--dirty-threads`, `--dirty-graceful-timeout` - Lifecycle hooks: `on_dirty_starting`, `dirty_post_fork`, `dirty_worker_init`, `dirty_worker_exit` - **Per-App Worker Allocation for Dirty Arbiters**: Control how many dirty workers load each app for memory optimization with heavy models ([PR #3473](https://github.com/benoitc/gunicorn/pull/3473)) - Set `workers` class attribute on DirtyApp (e.g., `workers = 2`) - Or use config format `module:class:N` (e.g., `myapp:HeavyModel:2`) - Requests automatically routed to workers with the target app - New exception `DirtyNoWorkersAvailableError` for graceful error handling - Example: 8 workers × 10GB model = 80GB → with `workers=2`: 20GB (75% savings) - **HTTP/2 Support (Beta)**: Native HTTP/2 (RFC 7540) support for improved performance with modern clients ([PR #3468](https://github.com/benoitc/gunicorn/pull/3468)) - Multiplexed streams over a single connection - Header compression (HPACK) - Flow control and stream prioritization - Works with gthread, gevent, and ASGI workers - New settings: `--http-protocols`, `--http2-max-concurrent-streams`, `--http2-initial-window-size`, `--http2-max-frame-size`, `--http2-max-header-list-size` - Requires SSL/TLS and h2 library: `pip install gunicorn[http2]` - See [HTTP/2 Guide](guides/http2.md) for details - New example: `examples/http2_gevent/` with Docker and tests - **HTTP 103 Early Hints**: Support for RFC 8297 Early Hints to enable browsers to preload resources before the final response ([PR #3468](https://github.com/benoitc/gunicorn/pull/3468)) - WSGI: `environ['wsgi.early_hints'](headers)` callback - ASGI: `http.response.informational` message type - Works with both HTTP/1.1 and HTTP/2 - **uWSGI Protocol for ASGI Worker**: The ASGI worker now supports receiving requests via the uWSGI binary protocol from nginx ([PR #3467](https://github.com/benoitc/gunicorn/pull/3467)) ### Bug Fixes - Fix HTTP/2 ALPN negotiation for gevent and eventlet workers when `do_handshake_on_connect` is False (the default). The TLS handshake is now explicitly performed before checking `selected_alpn_protocol()`. - Fix setproctitle initialization with systemd socket activation ([#3465](https://github.com/benoitc/gunicorn/issues/3465)) - Fix `Expect: 100-continue` handling: ignore the header for HTTP/1.0 requests since 100-continue is only valid for HTTP/1.1+ ([PR #3463](https://github.com/benoitc/gunicorn/pull/3463)) - Fix missing `_expected_100_continue` attribute in UWSGIRequest - Disable setproctitle on macOS to prevent segfaults during process title updates - Publish full exception traceback when the application fails to load ([#3462](https://github.com/benoitc/gunicorn/issues/3462)) - Fix ASGI: quick shutdown on SIGINT/SIGQUIT, graceful on SIGTERM ### Deprecations - **Eventlet Worker**: The `eventlet` worker is deprecated and will be removed in Gunicorn 26.0. Eventlet itself is [no longer actively maintained](https://eventlet.readthedocs.io/en/latest/asyncio/migration.html). Please migrate to `gevent`, `gthread`, or another supported worker type. ### Changes - Remove obsolete Makefile targets ([PR #3471](https://github.com/benoitc/gunicorn/pull/3471)) --- ## 24.1.1 - 2026-01-24 ### Bug Fixes - Fix `forwarded_allow_ips` and `proxy_allow_ips` to remain as strings for backward compatibility with external tools like uvicorn. Network validation now uses strict mode to detect invalid CIDR notation (e.g., `192.168.1.1/24` where host bits are set) ([#3458](https://github.com/benoitc/gunicorn/issues/3458), [PR #3459](https://github.com/benoitc/gunicorn/pull/3459)) --- ## 24.1.0 - 2026-01-23 ### New Features - **Official Docker Image**: Gunicorn now publishes official Docker images to GitHub Container Registry at `ghcr.io/benoitc/gunicorn` - Based on Python 3.12 slim image - Uses recommended worker formula (2 × CPU + 1) - Configurable via environment variables - **PROXY Protocol v2 Support**: Extended PROXY protocol implementation to support the binary v2 format in addition to the existing text-based v1 format - New `--proxy-protocol` modes: `off`, `v1`, `v2`, `auto` - Works with HAProxy, AWS NLB/ALB, and other PROXY protocol v2 sources - **CIDR Network Support**: `--forwarded-allow-ips` and `--proxy-allow-from` now accept CIDR notation (e.g., `192.168.0.0/16`) for specifying trusted networks - **Socket Backlog Metric**: New `gunicorn.socket.backlog` gauge metric reports the current socket backlog size on Linux systems - **InotifyReloader Enhancement**: The inotify-based reloader now watches newly imported modules, not just those loaded at startup ### Bug Fixes - Fix signal handling regression where SIGCLD alias caused errors on Linux - Fix socket blocking mode on keepalive connections with async workers - Handle `SSLWantReadError` in `finish_body()` to prevent worker hangs - Log SIGTERM as info level instead of warning - Print exception details to stderr when worker fails to boot - Fix `unreader.unread()` to prepend data to buffer instead of appending - Prevent `RecursionError` when pickling Config objects --- ## 24.0.0 - 2026-01-23 ### New Features - **ASGI Worker (Beta)**: Native asyncio-based ASGI support for running async Python frameworks like FastAPI, Starlette, and Quart without external dependencies - HTTP/1.1 with keepalive connections - WebSocket support - Lifespan protocol for startup/shutdown hooks - Optional uvloop for improved performance - **uWSGI Binary Protocol**: Support for receiving requests from nginx via `uwsgi_pass` directive - **Documentation Migration**: Migrated to MkDocs with Material theme ### Security - **eventlet**: Require eventlet >= 0.40.3 (CVE-2021-21419, CVE-2025-58068) - **gevent**: Require gevent >= 24.10.1 (CVE-2023-41419, CVE-2024-3219) - **tornado**: Require tornado >= 6.5.0 (CVE-2025-47287) --- ## 23.0.0 - 2024-08-10 - minor docs fixes ([PR #3217](https://github.com/benoitc/gunicorn/pull/3217), [PR #3089](https://github.com/benoitc/gunicorn/pull/3089), [PR #3167](https://github.com/benoitc/gunicorn/pull/3167)) - worker_class parameter accepts a class ([PR #3079](https://github.com/benoitc/gunicorn/pull/3079)) - fix deadlock if request terminated during chunked parsing ([PR #2688](https://github.com/benoitc/gunicorn/pull/2688)) - permit receiving Transfer-Encodings: compress, deflate, gzip ([PR #3261](https://github.com/benoitc/gunicorn/pull/3261)) - permit Transfer-Encoding headers specifying multiple encodings. note: no parameters, still ([PR #3261](https://github.com/benoitc/gunicorn/pull/3261)) - sdist generation now explicitly excludes sphinx build folder ([PR #3257](https://github.com/benoitc/gunicorn/pull/3257)) - decode bytes-typed status (as can be passed by gevent) as utf-8 instead of raising `TypeError` ([PR #2336](https://github.com/benoitc/gunicorn/pull/2336)) - raise correct Exception when encounting invalid chunked requests ([PR #3258](https://github.com/benoitc/gunicorn/pull/3258)) - the SCRIPT_NAME and PATH_INFO headers, when received from allowed forwarders, are no longer restricted for containing an underscore ([PR #3192](https://github.com/benoitc/gunicorn/pull/3192)) - include IPv6 loopback address ``[::1]`` in default for [forwarded-allow-ips](reference/settings.md#forwarded_allow_ips) and [proxy-allow-ips](reference/settings.md#proxy_allow_ips) ([PR #3192](https://github.com/benoitc/gunicorn/pull/3192)) !!! note - The SCRIPT_NAME change mitigates a regression that appeared first in the 22.0.0 release - Review your [forwarded-allow-ips](reference/settings.md#forwarded_allow_ips) setting if you are still not seeing the SCRIPT_NAME transmitted - Review your [forwarder-headers](reference/settings.md#forwarder_headers) setting if you are missing headers after upgrading from a version prior to 22.0.0 ### Breaking changes - refuse requests where the uri field is empty ([PR #3255](https://github.com/benoitc/gunicorn/pull/3255)) - refuse requests with invalid CR/LR/NUL in heade field values ([PR #3253](https://github.com/benoitc/gunicorn/pull/3253)) - remove temporary ``--tolerate-dangerous-framing`` switch from 22.0 ([PR #3260](https://github.com/benoitc/gunicorn/pull/3260)) - If any of the breaking changes affect you, be aware that now refused requests can post a security problem, especially so in setups involving request pipe-lining and/or proxies. ## 22.0.0 - 2024-04-17 - use `utime` to notify workers liveness - migrate setup to pyproject.toml - fix numerous security vulnerabilities in HTTP parser (closing some request smuggling vectors) - parsing additional requests is no longer attempted past unsupported request framing - on HTTP versions < 1.1 support for chunked transfer is refused (only used in exploits) - requests conflicting configured or passed SCRIPT_NAME now produce a verbose error - Trailer fields are no longer inspected for headers indicating secure scheme - support Python 3.12 ### Breaking changes - minimum version is Python 3.7 - the limitations on valid characters in the HTTP method have been bounded to Internet Standards - requests specifying unsupported transfer coding (order.md) are refused by default (rare.md) - HTTP methods are no longer casefolded by default (IANA method registry contains none affected) - HTTP methods containing the number sign (#) are no longer accepted by default (rare.md) - HTTP versions < 1.0 or >= 2.0 are no longer accepted by default (rare, only HTTP/1.1 is supported) - HTTP versions consisting of multiple digits or containing a prefix/suffix are no longer accepted - HTTP header field names Gunicorn cannot safely map to variables are silently dropped, as in other software - HTTP headers with empty field name are refused by default (no legitimate use cases, used in exploits) - requests with both Transfer-Encoding and Content-Length are refused by default (such a message might indicate an attempt to perform request smuggling) - empty transfer codings are no longer permitted (reportedly seen with really old & broken proxies) ### Security - fix CVE-2024-1135 ## History - [2026](2026-news.md) - [2024](2024-news.md) - [2023](2023-news.md) - [2021](2021-news.md) - [2020](2020-news.md) - [2019](2019-news.md) - [2018](2018-news.md) - [2017](2017-news.md) - [2016](2016-news.md) - [2015](2015-news.md) - [2014](2014-news.md) - [2013](2013-news.md) - [2012](2012-news.md) - [2011](2011-news.md) - [2010](2010-news.md) benoitc-gunicorn-f5fb19e/docs/content/quickstart.md000066400000000000000000000035411514360242400226040ustar00rootroot00000000000000# Quickstart Get a Python web application running with Gunicorn in 5 minutes. ## Install ```bash pip install gunicorn ``` ## Create an Application Create `app.py`: === "Flask" ```python from flask import Flask app = Flask(__name__) @app.route("/") def hello(): return "Hello, World!" ``` === "FastAPI" ```python from fastapi import FastAPI app = FastAPI() @app.get("/") def hello(): return {"message": "Hello, World!"} ``` === "Django" Django projects already have a WSGI application at `myproject/wsgi.py`. No additional code is needed. === "Plain WSGI" ```python def app(environ, start_response): data = b"Hello, World!" start_response("200 OK", [ ("Content-Type", "text/plain"), ("Content-Length", str(len(data))) ]) return [data] ``` ## Run ```bash gunicorn app:app ``` For Django: ```bash gunicorn myproject.wsgi ``` For FastAPI (ASGI): ```bash gunicorn app:app --worker-class asgi ``` ## Add Workers Use multiple workers to handle concurrent requests: ```bash gunicorn app:app --workers 4 ``` A good starting point is `2 * CPU_CORES + 1` workers. ## Bind to a Port By default Gunicorn binds to `127.0.0.1:8000`. Change it with: ```bash gunicorn app:app --bind 0.0.0.0:8080 ``` ## Configuration File Create `gunicorn.conf.py` for reusable settings: ```python bind = "0.0.0.0:8000" workers = 4 accesslog = "-" ``` Then run: ```bash gunicorn app:app ``` Gunicorn automatically loads `gunicorn.conf.py` from the current directory. ## Next Steps - [Run](run.md) - Full CLI reference and framework integration - [Configure](configure.md) - Configuration file options - [Deploy](deploy.md) - Production deployment with nginx and process managers - [Settings](reference/settings.md) - Complete settings reference benoitc-gunicorn-f5fb19e/docs/content/reference/000077500000000000000000000000001514360242400220235ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/reference/settings.md000066400000000000000000001420051514360242400242070ustar00rootroot00000000000000> **Generated file** — update `gunicorn/config.py` instead. # Settings This reference is built directly from `gunicorn.config.KNOWN_SETTINGS` and is regenerated during every documentation build. !!! note Settings can be provided through the `GUNICORN_CMD_ARGS` environment variable. For example: ```console $ GUNICORN_CMD_ARGS="--bind=127.0.0.1 --workers=3" gunicorn app:app ``` _Added in 19.7._ ## Config File ### `config` **Command line:** `-c CONFIG`, `--config CONFIG` **Default:** `'./gunicorn.conf.py'` [The Gunicorn config file](../configure.md#configuration-file). A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``. Only has an effect when specified on the command line or as part of an application specific configuration. By default, a file named ``gunicorn.conf.py`` will be read from the same directory where gunicorn is being run. !!! info "Changed in 19.4" Loading the config from a Python module requires the ``python:`` prefix. ### `wsgi_app` **Default:** `None` A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``. !!! info "Added in 20.1.0" ## Control ### `control_socket` **Command line:** `--control-socket PATH` **Default:** `'gunicorn.ctl'` Unix socket path for control interface. The control socket allows runtime management of Gunicorn via the ``gunicornc`` command-line tool. Commands include viewing worker status, adjusting worker count, and graceful reload/shutdown. By default, creates ``gunicorn.ctl`` in the working directory. Set an absolute path for a fixed location (e.g., ``/var/run/gunicorn.ctl``). Use ``--no-control-socket`` to disable. !!! info "Added in 25.1.0" ### `control_socket_mode` **Command line:** `--control-socket-mode INT` **Default:** `384` Permission mode for control socket. Restricts who can connect to the control socket. Default ``0600`` allows only the socket owner. Set to ``0660`` to allow group access. !!! info "Added in 25.1.0" ### `control_socket_disable` **Command line:** `--no-control-socket` **Default:** `False` Disable control socket. When set, no control socket is created and ``gunicornc`` cannot connect to this Gunicorn instance. !!! info "Added in 25.1.0" ## Debugging ### `reload` **Command line:** `--reload` **Default:** `False` Restart workers when code changes. This setting is intended for development. It will cause workers to be restarted whenever application code changes. The reloader is incompatible with application preloading. When using a paste configuration be sure that the server block does not import any application code or the reload will not work as designed. The default behavior is to attempt inotify with a fallback to file system polling. Generally, inotify should be preferred if available because it consumes less system resources. !!! note In order to use the inotify reloader, you must have the ``inotify`` package installed. !!! warning Enabling this will change what happens on failure to load the the application: While the reloader is active, any and all clients that can make requests can see the full exception and traceback! ### `reload_engine` **Command line:** `--reload-engine STRING` **Default:** `'auto'` The implementation that should be used to power [reload](#reload). Valid engines are: * ``'auto'`` * ``'poll'`` * ``'inotify'`` (requires inotify) !!! info "Added in 19.7" ### `reload_extra_files` **Command line:** `--reload-extra-file FILES` **Default:** `[]` Extends [reload](#reload) option to also watch and reload on additional files (e.g., templates, configurations, specifications, etc.). !!! info "Added in 19.8" ### `spew` **Command line:** `--spew` **Default:** `False` Install a trace function that spews every line executed by the server. This is the nuclear option. ### `check_config` **Command line:** `--check-config` **Default:** `False` Check the configuration and exit. The exit status is 0 if the configuration is correct, and 1 if the configuration is incorrect. ### `print_config` **Command line:** `--print-config` **Default:** `False` Print the configuration settings as fully resolved. Implies [check-config](#check_config). ## Dirty Arbiter Hooks ### `on_dirty_starting` **Default:** ```python def on_dirty_starting(arbiter): pass ``` Called just before the dirty arbiter process is initialized. The callable needs to accept a single instance variable for the DirtyArbiter. !!! info "Added in 25.0.0" ### `dirty_post_fork` **Default:** ```python def dirty_post_fork(arbiter, worker): pass ``` Called just after a dirty worker has been forked. The callable needs to accept two instance variables for the DirtyArbiter and new DirtyWorker. !!! info "Added in 25.0.0" ### `dirty_worker_init` **Default:** ```python def dirty_worker_init(worker): pass ``` Called just after a dirty worker has initialized all applications. The callable needs to accept one instance variable for the DirtyWorker. !!! info "Added in 25.0.0" ### `dirty_worker_exit` **Default:** ```python def dirty_worker_exit(arbiter, worker): pass ``` Called when a dirty worker has exited. The callable needs to accept two instance variables for the DirtyArbiter and the exiting DirtyWorker. !!! info "Added in 25.0.0" ## Dirty Arbiters ### `dirty_apps` **Command line:** `--dirty-app STRING` **Default:** `[]` Dirty applications to load in the dirty worker pool. A list of application paths in one of these formats: - ``$(MODULE_NAME):$(CLASS_NAME)`` - all workers load this app - ``$(MODULE_NAME):$(CLASS_NAME):$(N)`` - only N workers load this app Each dirty app must be a class that inherits from ``DirtyApp`` base class and implements the ``init()``, ``__call__()``, and ``close()`` methods. Example:: dirty_apps = [ "myapp.ml:MLApp", # All workers load this "myapp.images:ImageApp", # All workers load this "myapp.heavy:HugeModel:2", # Only 2 workers load this ] The per-app worker limit is useful for memory-intensive applications like large ML models. Instead of all 8 workers loading a 10GB model (80GB total), you can limit it to 2 workers (20GB total). Alternatively, you can set the ``workers`` class attribute on your DirtyApp subclass:: class HugeModelApp(DirtyApp): workers = 2 # Only 2 workers load this app def init(self): self.model = load_10gb_model() Note: The config format (``module:Class:N``) takes precedence over the class attribute if both are specified. Dirty apps are loaded once when the dirty worker starts and persist in memory for the lifetime of the worker. This is ideal for loading ML models, database connection pools, or other stateful resources that are expensive to initialize. !!! info "Added in 25.0.0" !!! info "Changed in 25.1.0" Added per-app worker allocation via ``:N`` format suffix. ### `dirty_workers` **Command line:** `--dirty-workers INT` **Default:** `0` The number of dirty worker processes. A positive integer. Set to 0 (default) to disable the dirty arbiter. When set to a positive value, a dirty arbiter process will be spawned to manage the dirty worker pool. Dirty workers are separate from HTTP workers and are designed for long-running, blocking operations like ML model inference or heavy computation. !!! info "Added in 25.0.0" ### `dirty_timeout` **Command line:** `--dirty-timeout INT` **Default:** `300` Timeout for dirty task execution in seconds. Workers silent for more than this many seconds are considered stuck and will be killed. Set to a high value for operations like model loading that may take a long time. Value is a positive number. Setting it to 0 disables timeout checking. !!! info "Added in 25.0.0" ### `dirty_threads` **Command line:** `--dirty-threads INT` **Default:** `1` The number of threads per dirty worker. Each dirty worker can use threads to handle concurrent operations within the same process, useful for async-safe applications. !!! info "Added in 25.0.0" ### `dirty_graceful_timeout` **Command line:** `--dirty-graceful-timeout INT` **Default:** `30` Timeout for graceful dirty worker shutdown in seconds. After receiving a shutdown signal, dirty workers have this much time to finish their current tasks. Workers still alive after the timeout are force killed. !!! info "Added in 25.0.0" ## HTTP/2 ### `http_protocols` **Command line:** `--http-protocols STRING` **Default:** `'h1'` HTTP protocol versions to support (comma-separated, order = preference). Valid protocols: * ``h1`` - HTTP/1.1 (default) * ``h2`` - HTTP/2 (requires TLS with ALPN) * ``h3`` - HTTP/3 (future, not yet implemented) Examples:: # HTTP/1.1 only (default, backward compatible) --http-protocols=h1 # Prefer HTTP/2, fallback to HTTP/1.1 --http-protocols=h2,h1 # HTTP/2 only (reject HTTP/1.1 clients) --http-protocols=h2 HTTP/2 requires: * TLS (--certfile and --keyfile) * The h2 library: ``pip install gunicorn[http2]`` * ALPN-capable TLS client !!! note HTTP/2 cleartext (h2c) is not supported due to security concerns and lack of browser support. !!! info "Added in 25.0.0" ### `http2_max_concurrent_streams` **Command line:** `--http2-max-concurrent-streams INT` **Default:** `100` Maximum number of concurrent HTTP/2 streams per connection. This limits how many requests can be processed simultaneously on a single HTTP/2 connection. Higher values allow more parallelism but use more memory. Default is 100, which matches common server configurations. The HTTP/2 specification allows up to 2^31-1. !!! info "Added in 25.0.0" ### `http2_initial_window_size` **Command line:** `--http2-initial-window-size INT` **Default:** `65535` Initial HTTP/2 flow control window size in bytes. This controls how much data can be in-flight before the receiver sends WINDOW_UPDATE frames. Larger values can improve throughput for large transfers but use more memory. Default is 65535 (64KB - 1), the HTTP/2 specification default. Maximum is 2^31-1 (2147483647). !!! info "Added in 25.0.0" ### `http2_max_frame_size` **Command line:** `--http2-max-frame-size INT` **Default:** `16384` Maximum HTTP/2 frame payload size in bytes. This is the largest frame payload the server will accept. Larger frames reduce framing overhead but may increase latency for small messages. Default is 16384 (16KB), the HTTP/2 specification minimum. Range is 16384 to 16777215 (16MB - 1). !!! info "Added in 25.0.0" ### `http2_max_header_list_size` **Command line:** `--http2-max-header-list-size INT` **Default:** `65536` Maximum size of HTTP/2 header list in bytes (HPACK protection). This limits the total size of headers after HPACK decompression. Protects against compression bombs and excessive memory use. Default is 65536 (64KB). Set to 0 for unlimited (not recommended). !!! info "Added in 25.0.0" ## Logging ### `accesslog` **Command line:** `--access-logfile FILE` **Default:** `None` The Access log file to write to. ``'-'`` means log to stdout. ### `disable_redirect_access_to_syslog` **Command line:** `--disable-redirect-access-to-syslog` **Default:** `False` Disable redirect access logs to syslog. !!! info "Added in 19.8" ### `access_log_format` **Command line:** `--access-logformat STRING` **Default:** `'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'` The access log format. =========== =========== Identifier Description =========== =========== h remote address l ``'-'`` u user name (if HTTP Basic auth used) t date of the request r status line (e.g. ``GET / HTTP/1.1``) m request method U URL path without query string q query string H protocol s status B response length b response length or ``'-'`` (CLF format) f referrer (note: header is ``referer``) a user agent T request time in seconds M request time in milliseconds D request time in microseconds L request time in decimal seconds p process ID {header}i request header {header}o response header {variable}e environment variable =========== =========== Use lowercase for header and environment variable names, and put ``{...}x`` names inside ``%(...)s``. For example:: %({x-forwarded-for}i)s ### `errorlog` **Command line:** `--error-logfile FILE`, `--log-file FILE` **Default:** `'-'` The Error log file to write to. Using ``'-'`` for FILE makes gunicorn log to stderr. !!! info "Changed in 19.2" Log to stderr by default. ### `loglevel` **Command line:** `--log-level LEVEL` **Default:** `'info'` The granularity of Error log outputs. Valid level names are: * ``'debug'`` * ``'info'`` * ``'warning'`` * ``'error'`` * ``'critical'`` ### `capture_output` **Command line:** `--capture-output` **Default:** `False` Redirect stdout/stderr to specified file in [errorlog](#errorlog). !!! info "Added in 19.6" ### `logger_class` **Command line:** `--logger-class STRING` **Default:** `'gunicorn.glogging.Logger'` The logger you want to use to log events in Gunicorn. The default class (``gunicorn.glogging.Logger``) handles most normal usages in logging. It provides error and access logging. You can provide your own logger by giving Gunicorn a Python path to a class that quacks like ``gunicorn.glogging.Logger``. ### `logconfig` **Command line:** `--log-config FILE` **Default:** `None` The log config file to use. Gunicorn uses the standard Python logging module's Configuration file format. ### `logconfig_dict` **Default:** `{}` The log config dictionary to use, using the standard Python logging module's dictionary configuration format. This option takes precedence over the [logconfig](#logconfig) and [logconfig-json](#logconfig_json) options, which uses the older file configuration format and JSON respectively. Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig For more context you can look at the default configuration dictionary for logging, which can be found at ``gunicorn.glogging.CONFIG_DEFAULTS``. !!! info "Added in 19.8" ### `logconfig_json` **Command line:** `--log-config-json FILE` **Default:** `None` The log config to read config from a JSON file Format: https://docs.python.org/3/library/logging.config.html#logging.config.jsonConfig !!! info "Added in 20.0" ### `syslog_addr` **Command line:** `--log-syslog-to SYSLOG_ADDR` **Default:** Platform-specific: * macOS: ``'unix:///var/run/syslog'`` * FreeBSD/DragonFly: ``'unix:///var/run/log'`` * OpenBSD: ``'unix:///dev/log'`` * Linux/other: ``'udp://localhost:514'`` Address to send syslog messages. Address is a string of the form: * ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream`` for the stream driver or ``dgram`` for the dgram driver. ``stream`` is the default. * ``udp://HOST:PORT`` : for UDP sockets * ``tcp://HOST:PORT`` : for TCP sockets ### `syslog` **Command line:** `--log-syslog` **Default:** `False` Send *Gunicorn* logs to syslog. !!! info "Changed in 19.8" You can now disable sending access logs by using the disable-redirect-access-to-syslog setting. ### `syslog_prefix` **Command line:** `--log-syslog-prefix SYSLOG_PREFIX` **Default:** `None` Makes Gunicorn use the parameter as program-name in the syslog entries. All entries will be prefixed by ``gunicorn.``. By default the program name is the name of the process. ### `syslog_facility` **Command line:** `--log-syslog-facility SYSLOG_FACILITY` **Default:** `'user'` Syslog facility name ### `enable_stdio_inheritance` **Command line:** `-R`, `--enable-stdio-inheritance` **Default:** `False` Enable stdio inheritance. Enable inheritance for stdio file descriptors in daemon mode. Note: To disable the Python stdout buffering, you can to set the user environment variable ``PYTHONUNBUFFERED`` . ### `statsd_host` **Command line:** `--statsd-host STATSD_ADDR` **Default:** `None` The address of the StatsD server to log to. Address is a string of the form: * ``unix://PATH`` : for a unix domain socket. * ``HOST:PORT`` : for a network address !!! info "Added in 19.1" ### `dogstatsd_tags` **Command line:** `--dogstatsd-tags DOGSTATSD_TAGS` **Default:** `''` A comma-delimited list of datadog statsd (dogstatsd) tags to append to statsd metrics. e.g. ``'tag1:value1,tag2:value2'`` !!! info "Added in 20" ### `statsd_prefix` **Command line:** `--statsd-prefix STATSD_PREFIX` **Default:** `''` Prefix to use when emitting statsd metrics (a trailing ``.`` is added, if not provided). !!! info "Added in 19.2" ### `enable_backlog_metric` **Command line:** `--enable-backlog-metric` **Default:** `False` Enable socket backlog metric (only supported on Linux). When enabled, gunicorn will emit a ``gunicorn.backlog`` histogram metric showing the number of connections waiting in the socket backlog. ## Process Naming ### `proc_name` **Command line:** `-n STRING`, `--name STRING` **Default:** `None` A base to use with setproctitle for process naming. This affects things like ``ps`` and ``top``. If you're going to be running more than one instance of Gunicorn you'll probably want to set a name to tell them apart. This requires that you install the setproctitle module. If not set, the *default_proc_name* setting will be used. ### `default_proc_name` **Default:** `'gunicorn'` Internal setting that is adjusted for each type of application. ## SSL ### `keyfile` **Command line:** `--keyfile FILE` **Default:** `None` SSL key file ### `certfile` **Command line:** `--certfile FILE` **Default:** `None` SSL certificate file ### `ssl_version` **Command line:** `--ssl-version` **Default:** `<_SSLMethod.PROTOCOL_TLS: 2>` SSL version to use (see stdlib ssl module's). !!! danger "Deprecated in 21.0" The option is deprecated and it is currently ignored. Use [ssl-context](#ssl_context) instead. ============= ============ --ssl-version Description ============= ============ SSLv3 SSLv3 is not-secure and is strongly discouraged. SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS. TLS Negotiate highest possible version between client/server. Can yield SSL. (Python 3.6+) TLSv1 TLS 1.0 TLSv1_1 TLS 1.1 (Python 3.4+) TLSv1_2 TLS 1.2 (Python 3.4+) TLS_SERVER Auto-negotiate the highest protocol version like TLS, but only support server-side SSLSocket connections. (Python 3.6+) ============= ============ !!! info "Changed in 19.7" The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to ``ssl.PROTOCOL_SSLv23``. !!! info "Changed in 20.0" This setting now accepts string names based on ``ssl.PROTOCOL_`` constants. !!! info "Changed in 20.0.1" The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to ``ssl.PROTOCOL_TLS`` when Python >= 3.6 . ### `cert_reqs` **Command line:** `--cert-reqs` **Default:** `` Whether client certificate is required (see stdlib ssl module's) =========== =========================== --cert-reqs Description =========== =========================== `0` no client verification `1` ssl.CERT_OPTIONAL `2` ssl.CERT_REQUIRED =========== =========================== ### `ca_certs` **Command line:** `--ca-certs FILE` **Default:** `None` CA certificates file ### `suppress_ragged_eofs` **Command line:** `--suppress-ragged-eofs` **Default:** `True` Suppress ragged EOFs (see stdlib ssl module's) ### `do_handshake_on_connect` **Command line:** `--do-handshake-on-connect` **Default:** `False` Whether to perform SSL handshake on socket connect (see stdlib ssl module's) ### `ciphers` **Command line:** `--ciphers` **Default:** `None` SSL Cipher suite to use, in the format of an OpenSSL cipher list. By default we use the default cipher list from Python's ``ssl`` module, which contains ciphers considered strong at the time of each Python release. As a recommended alternative, the Open Web App Security Project (OWASP) offers `a vetted set of strong cipher strings rated A+ to C- `_. OWASP provides details on user-agent compatibility at each security level. See the `OpenSSL Cipher List Format Documentation `_ for details on the format of an OpenSSL cipher list. ## Security ### `limit_request_line` **Command line:** `--limit-request-line INT` **Default:** `4094` The maximum size of HTTP request line in bytes. This parameter is used to limit the allowed size of a client's HTTP request-line. Since the request-line consists of the HTTP method, URI, and protocol version, this directive places a restriction on the length of a request-URI allowed for a request on the server. A server needs this value to be large enough to hold any of its resource names, including any information that might be passed in the query part of a GET request. Value is a number from 0 (unlimited) to 8190. This parameter can be used to prevent any DDOS attack. ### `limit_request_fields` **Command line:** `--limit-request-fields INT` **Default:** `100` Limit the number of HTTP headers fields in a request. This parameter is used to limit the number of headers in a request to prevent DDOS attack. Used with the *limit_request_field_size* it allows more safety. By default this value is 100 and can't be larger than 32768. ### `limit_request_field_size` **Command line:** `--limit-request-field_size INT` **Default:** `8190` Limit the allowed size of an HTTP request header field. Value is a positive number or 0. Setting it to 0 will allow unlimited header field sizes. !!! warning Setting this parameter to a very high or unlimited value can open up for DDOS attacks. ## Server Hooks ### `on_starting` **Default:** ```python def on_starting(server): pass ``` Called just before the master process is initialized. The callable needs to accept a single instance variable for the Arbiter. ### `on_reload` **Default:** ```python def on_reload(server): pass ``` Called to recycle workers during a reload via SIGHUP. The callable needs to accept a single instance variable for the Arbiter. ### `when_ready` **Default:** ```python def when_ready(server): pass ``` Called just after the server is started. The callable needs to accept a single instance variable for the Arbiter. ### `pre_fork` **Default:** ```python def pre_fork(server, worker): pass ``` Called just before a worker is forked. The callable needs to accept two instance variables for the Arbiter and new Worker. ### `post_fork` **Default:** ```python def post_fork(server, worker): pass ``` Called just after a worker has been forked. The callable needs to accept two instance variables for the Arbiter and new Worker. ### `post_worker_init` **Default:** ```python def post_worker_init(worker): pass ``` Called just after a worker has initialized the application. The callable needs to accept one instance variable for the initialized Worker. ### `worker_int` **Default:** ```python def worker_int(worker): pass ``` Called just after a worker exited on SIGINT or SIGQUIT. The callable needs to accept one instance variable for the initialized Worker. ### `worker_abort` **Default:** ```python def worker_abort(worker): pass ``` Called when a worker received the SIGABRT signal. This call generally happens on timeout. The callable needs to accept one instance variable for the initialized Worker. ### `pre_exec` **Default:** ```python def pre_exec(server): pass ``` Called just before a new master process is forked. The callable needs to accept a single instance variable for the Arbiter. ### `pre_request` **Default:** ```python def pre_request(worker, req): worker.log.debug("%s %s", req.method, req.path) ``` Called just before a worker processes the request. The callable needs to accept two instance variables for the Worker and the Request. ### `post_request` **Default:** ```python def post_request(worker, req, environ, resp): pass ``` Called after a worker processes the request. The callable needs to accept two instance variables for the Worker and the Request. If a third parameter is defined it will be passed the environment. If a fourth parameter is defined it will be passed the Response. ### `child_exit` **Default:** ```python def child_exit(server, worker): pass ``` Called just after a worker has been exited, in the master process. The callable needs to accept two instance variables for the Arbiter and the just-exited Worker. !!! info "Added in 19.7" ### `worker_exit` **Default:** ```python def worker_exit(server, worker): pass ``` Called just after a worker has been exited, in the worker process. The callable needs to accept two instance variables for the Arbiter and the just-exited Worker. ### `nworkers_changed` **Default:** ```python def nworkers_changed(server, new_value, old_value): pass ``` Called just after *num_workers* has been changed. The callable needs to accept an instance variable of the Arbiter and two integers of number of workers after and before change. If the number of workers is set for the first time, *old_value* would be ``None``. ### `on_exit` **Default:** ```python def on_exit(server): pass ``` Called just before exiting Gunicorn. The callable needs to accept a single instance variable for the Arbiter. ### `ssl_context` **Default:** ```python def ssl_context(config, default_ssl_context_factory): return default_ssl_context_factory() ``` Called when SSLContext is needed. Allows customizing SSL context. The callable needs to accept an instance variable for the Config and a factory function that returns default SSLContext which is initialized with certificates, private key, cert_reqs, and ciphers according to config and can be further customized by the callable. The callable needs to return SSLContext object. Following example shows a configuration file that sets the minimum TLS version to 1.3: ```python def ssl_context(conf, default_ssl_context_factory): import ssl context = default_ssl_context_factory() context.minimum_version = ssl.TLSVersion.TLSv1_3 return context ``` !!! info "Added in 21.0" ## Server Mechanics ### `preload_app` **Command line:** `--preload` **Default:** `False` Load application code before the worker processes are forked. By preloading an application you can save some RAM resources as well as speed up server boot times. Although, if you defer application loading to each worker process, you can reload your application code easily by restarting workers. ### `sendfile` **Command line:** `--no-sendfile` **Default:** `None` Disables the use of ``sendfile()``. If not set, the value of the ``SENDFILE`` environment variable is used to enable or disable its usage. !!! info "Added in 19.2" !!! info "Changed in 19.4" Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow disabling. !!! info "Changed in 19.6" added support for the ``SENDFILE`` environment variable ### `reuse_port` **Command line:** `--reuse-port` **Default:** `False` Set the ``SO_REUSEPORT`` flag on the listening socket. !!! info "Added in 19.8" ### `chdir` **Command line:** `--chdir` **Default:** ``'.'`` Change directory to specified directory before loading apps. ### `daemon` **Command line:** `-D`, `--daemon` **Default:** `False` Daemonize the Gunicorn process. Detaches the server from the controlling terminal and enters the background. ### `raw_env` **Command line:** `-e ENV`, `--env ENV` **Default:** `[]` Set environment variables in the execution environment. Should be a list of strings in the ``key=value`` format. For example on the command line: ```console $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app ``` Or in the configuration file: ```python raw_env = ["FOO=1"] ``` ### `pidfile` **Command line:** `-p FILE`, `--pid FILE` **Default:** `None` A filename to use for the PID file. If not set, no PID file will be written. ### `worker_tmp_dir` **Command line:** `--worker-tmp-dir DIR` **Default:** `None` A directory to use for the worker heartbeat temporary file. If not set, the default temporary directory will be used. !!! note The current heartbeat system involves calling ``os.fchmod`` on temporary file handlers and may block a worker for arbitrary time if the directory is on a disk-backed filesystem. See [blocking-os-fchmod](#blocking_os_fchmod) for more detailed information and a solution for avoiding this problem. ### `user` **Command line:** `-u USER`, `--user USER` **Default:** ``os.geteuid()`` Switch worker processes to run as this user. A valid user id (as an integer) or the name of a user that can be retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not change the worker process user. ### `group` **Command line:** `-g GROUP`, `--group GROUP` **Default:** ``os.getegid()`` Switch worker process to run as this group. A valid group id (as an integer) or the name of a user that can be retrieved with a call to ``grp.getgrnam(value)`` or ``None`` to not change the worker processes group. ### `umask` **Command line:** `-m INT`, `--umask INT` **Default:** `0` A bit mask for the file mode on files written by Gunicorn. Note that this affects unix socket permissions. A valid value for the ``os.umask(mode)`` call or a string compatible with ``int(value, 0)`` (``0`` means Python guesses the base, so values like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal representations) ### `initgroups` **Command line:** `--initgroups` **Default:** `False` If true, set the worker process's group access list with all of the groups of which the specified username is a member, plus the specified group id. !!! info "Added in 19.7" ### `tmp_upload_dir` **Default:** `None` Directory to store temporary request data as they are read. This may disappear in the near future. This path should be writable by the process permissions set for Gunicorn workers. If not specified, Gunicorn will choose a system generated temporary directory. ### `secure_scheme_headers` **Default:** `{'X-FORWARDED-PROTOCOL': 'ssl', 'X-FORWARDED-PROTO': 'https', 'X-FORWARDED-SSL': 'on'}` A dictionary containing headers and values that the front-end proxy uses to indicate HTTPS requests. If the source IP is permitted by [forwarded-allow-ips](#forwarded_allow_ips) (below), *and* at least one request header matches a key-value pair listed in this dictionary, then Gunicorn will set ``wsgi.url_scheme`` to ``https``, so your application can tell that the request is secure. If the other headers listed in this dictionary are not present in the request, they will be ignored, but if the other headers are present and do not match the provided values, then the request will fail to parse. See the note below for more detailed examples of this behaviour. The dictionary should map upper-case header names to exact string values. The value comparisons are case-sensitive, unlike the header names, so make sure they're exactly what your front-end proxy sends when handling HTTPS requests. It is important that your front-end proxy configuration ensures that the headers defined here can not be passed directly from the client. ### `forwarded_allow_ips` **Command line:** `--forwarded-allow-ips STRING` **Default:** `'127.0.0.1,::1'` Front-end's IP addresses or networks from which allowed to handle set secure headers. (comma separated). Supports both individual IP addresses (e.g., ``192.168.1.1``) and CIDR networks (e.g., ``192.168.0.0/16``). Set to ``*`` to disable checking of front-end IPs. This is useful for setups where you don't know in advance the IP address of front-end, but instead have ensured via other means that only your authorized front-ends can access Gunicorn. By default, the value of the ``FORWARDED_ALLOW_IPS`` environment variable. If it is not defined, the default is ``"127.0.0.1,::1"``. !!! note This option does not affect UNIX socket connections. Connections not associated with an IP address are treated as allowed, unconditionally. !!! note The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of ``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``: .. code:: secure_scheme_headers = { 'X-FORWARDED-PROTOCOL': 'ssl', 'X-FORWARDED-PROTO': 'https', 'X-FORWARDED-SSL': 'on' } .. list-table:: :header-rows: 1 :align: center :widths: auto * - ``forwarded-allow-ips`` - Secure Request Headers - Result - Explanation * - .. code:: ["127.0.0.1"] - .. code:: X-Forwarded-Proto: https - .. code:: wsgi.url_scheme = "http" - IP address was not allowed * - .. code:: "*" - - .. code:: wsgi.url_scheme = "http" - IP address allowed, but no secure headers provided * - .. code:: "*" - .. code:: X-Forwarded-Proto: https - .. code:: wsgi.url_scheme = "https" - IP address allowed, one request header matched * - .. code:: ["134.213.44.18"] - .. code:: X-Forwarded-Ssl: on X-Forwarded-Proto: http - ``InvalidSchemeHeaders()`` raised - IP address allowed, but the two secure headers disagreed on if HTTPS was used ### `pythonpath` **Command line:** `--pythonpath STRING` **Default:** `None` A comma-separated list of directories to add to the Python path. e.g. ``'/home/djangoprojects/myproject,/home/python/mylibrary'``. ### `paste` **Command line:** `--paste STRING`, `--paster STRING` **Default:** `None` Load a PasteDeploy config file. The argument may contain a ``#`` symbol followed by the name of an app section from the config file, e.g. ``production.ini#admin``. At this time, using alternate server blocks is not supported. Use the command line arguments to control server configuration instead. ### `proxy_protocol` **Command line:** `--proxy-protocol MODE` **Default:** `'off'` Enable PROXY protocol support. Allow using HTTP and PROXY protocol together. It may be useful for work with stunnel as HTTPS frontend and Gunicorn as HTTP server, or with HAProxy. Accepted values: * ``off`` - Disabled (default) * ``v1`` - PROXY protocol v1 only (text format) * ``v2`` - PROXY protocol v2 only (binary format) * ``auto`` - Auto-detect v1 or v2 Using ``--proxy-protocol`` without a value is equivalent to ``auto``. PROXY protocol v1: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt PROXY protocol v2: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt Example for stunnel config:: [https] protocol = proxy accept = 443 connect = 80 cert = /etc/ssl/certs/stunnel.pem key = /etc/ssl/certs/stunnel.key !!! info "Changed in 24.1.0" Extended to support version selection (v1, v2, auto). ### `proxy_allow_ips` **Command line:** `--proxy-allow-from` **Default:** `'127.0.0.1,::1'` Front-end's IP addresses or networks from which allowed accept proxy requests (comma separated). Supports both individual IP addresses (e.g., ``192.168.1.1``) and CIDR networks (e.g., ``192.168.0.0/16``). Set to ``*`` to disable checking of front-end IPs. This is useful for setups where you don't know in advance the IP address of front-end, but instead have ensured via other means that only your authorized front-ends can access Gunicorn. !!! note This option does not affect UNIX socket connections. Connections not associated with an IP address are treated as allowed, unconditionally. ### `protocol` **Command line:** `--protocol STRING` **Default:** `'http'` The protocol for incoming connections. * ``http`` - Standard HTTP/1.x (default) * ``uwsgi`` - uWSGI binary protocol (for nginx uwsgi_pass) When using the uWSGI protocol, Gunicorn can receive requests from nginx using the uwsgi_pass directive:: upstream gunicorn { server 127.0.0.1:8000; } location / { uwsgi_pass gunicorn; include uwsgi_params; } ### `uwsgi_allow_ips` **Command line:** `--uwsgi-allow-from` **Default:** `'127.0.0.1,::1'` IPs allowed to send uWSGI protocol requests (comma separated). Set to ``*`` to allow all IPs. This is useful for setups where you don't know in advance the IP address of front-end, but instead have ensured via other means that only your authorized front-ends can access Gunicorn. !!! note This option does not affect UNIX socket connections. Connections not associated with an IP address are treated as allowed, unconditionally. ### `raw_paste_global_conf` **Command line:** `--paste-global CONF` **Default:** `[]` Set a PasteDeploy global config variable in ``key=value`` form. The option can be specified multiple times. The variables are passed to the PasteDeploy entrypoint. Example:: $ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2 !!! info "Added in 19.7" ### `permit_obsolete_folding` **Command line:** `--permit-obsolete-folding` **Default:** `False` Permit requests employing obsolete HTTP line folding mechanism The folding mechanism was deprecated by rfc7230 Section 3.2.4 and will not be employed in HTTP request headers from standards-compliant HTTP clients. This option is provided to diagnose backwards-incompatible changes. Use with care and only if necessary. Temporary; the precise effect of this option may change in a future version, or it may be removed altogether. !!! info "Added in 23.0.0" ### `strip_header_spaces` **Command line:** `--strip-header-spaces` **Default:** `False` Strip spaces present between the header name and the the ``:``. This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard. See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn. Use with care and only if necessary. Deprecated; scheduled for removal in 25.0.0 !!! info "Added in 20.0.1" ### `permit_unconventional_http_method` **Command line:** `--permit-unconventional-http-method` **Default:** `False` Permit HTTP methods not matching conventions, such as IANA registration guidelines This permits request methods of length less than 3 or more than 20, methods with lowercase characters or methods containing the # character. HTTP methods are case sensitive by definition, and merely uppercase by convention. If unset, Gunicorn will apply nonstandard restrictions and cause 400 response status in cases where otherwise 501 status is expected. While this option does modify that behaviour, it should not be depended upon to guarantee standards-compliant behaviour. Rather, it is provided temporarily, to assist in diagnosing backwards-incompatible changes around the incomplete application of those restrictions. Use with care and only if necessary. Temporary; scheduled for removal in 24.0.0 !!! info "Added in 22.0.0" ### `permit_unconventional_http_version` **Command line:** `--permit-unconventional-http-version` **Default:** `False` Permit HTTP version not matching conventions of 2023 This disables the refusal of likely malformed request lines. It is unusual to specify HTTP 1 versions other than 1.0 and 1.1. This option is provided to diagnose backwards-incompatible changes. Use with care and only if necessary. Temporary; the precise effect of this option may change in a future version, or it may be removed altogether. !!! info "Added in 22.0.0" ### `casefold_http_method` **Command line:** `--casefold-http-method` **Default:** `False` Transform received HTTP methods to uppercase HTTP methods are case sensitive by definition, and merely uppercase by convention. This option is provided because previous versions of gunicorn defaulted to this behaviour. Use with care and only if necessary. Deprecated; scheduled for removal in 24.0.0 !!! info "Added in 22.0.0" ### `forwarder_headers` **Command line:** `--forwarder-headers` **Default:** `'SCRIPT_NAME,PATH_INFO'` A list containing upper-case header field names that the front-end proxy (see [forwarded-allow-ips](#forwarded_allow_ips)) sets, to be used in WSGI environment. This option has no effect for headers not present in the request. This option can be used to transfer ``SCRIPT_NAME``, ``PATH_INFO`` and ``REMOTE_USER``. It is important that your front-end proxy configuration ensures that the headers defined here can not be passed directly from the client. ### `header_map` **Command line:** `--header-map` **Default:** `'drop'` Configure how header field names are mapped into environ Headers containing underscores are permitted by RFC9110, but gunicorn joining headers of different names into the same environment variable will dangerously confuse applications as to which is which. The safe default ``drop`` is to silently drop headers that cannot be unambiguously mapped. The value ``refuse`` will return an error if a request contains *any* such header. The value ``dangerous`` matches the previous, not advisable, behaviour of mapping different header field names into the same environ name. If the source is permitted as explained in [forwarded-allow-ips](#forwarded_allow_ips), *and* the header name is present in [forwarder-headers](#forwarder_headers), the header is mapped into environment regardless of the state of this setting. Use with care and only if necessary and after considering if your problem could instead be solved by specifically renaming or rewriting only the intended headers on a proxy in front of Gunicorn. !!! info "Added in 22.0.0" ### `root_path` **Command line:** `--root-path STRING` **Default:** `''` The root path for ASGI applications. This is used to set the ``root_path`` in the ASGI scope, which allows applications to know their mount point when behind a reverse proxy. For example, if your application is mounted at ``/api``, set this to ``/api``. !!! info "Added in 24.0.0" ## Server Socket ### `bind` **Command line:** `-b ADDRESS`, `--bind ADDRESS` **Default:** `['127.0.0.1:8000']` The socket to bind. A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``, ``fd://FD``. An IP is a valid ``HOST``. !!! info "Changed in 20.0" Support for ``fd://FD`` got added. Multiple addresses can be bound. ex.:: $ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app will bind the `test:app` application on localhost both on ipv6 and ipv4 interfaces. If the ``PORT`` environment variable is defined, the default is ``['0.0.0.0:$PORT']``. If it is not defined, the default is ``['127.0.0.1:8000']``. ### `backlog` **Command line:** `--backlog INT` **Default:** `2048` The maximum number of pending connections. This refers to the number of clients that can be waiting to be served. Exceeding this number results in the client getting an error when attempting to connect. It should only affect servers under significant load. Must be a positive integer. Generally set in the 64-2048 range. ## Worker Processes ### `workers` **Command line:** `-w INT`, `--workers INT` **Default:** `1` The number of worker processes for handling requests. A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. You'll want to vary this a bit to find the best for your particular application's work load. By default, the value of the ``WEB_CONCURRENCY`` environment variable, which is set by some Platform-as-a-Service providers such as Heroku. If it is not defined, the default is ``1``. ### `worker_class` **Command line:** `-k STRING`, `--worker-class STRING` **Default:** `'sync'` The type of workers to use. The default class (``sync``) should handle most "normal" types of workloads. You'll want to read :doc:`design` for information on when you might want to choose one of the other worker classes. Required libraries may be installed using setuptools' ``extras_require`` feature. A string referring to one of the following bundled classes: * ``sync`` * ``eventlet`` - **DEPRECATED: will be removed in 26.0**. Requires eventlet >= 0.40.3 * ``gevent`` - Requires gevent >= 24.10.1 (or install it via ``pip install gunicorn[gevent]``) * ``tornado`` - Requires tornado >= 6.5.0 (or install it via ``pip install gunicorn[tornado]``) * ``gthread`` - Python 2 requires the futures package to be installed (or install it via ``pip install gunicorn[gthread]``) Optionally, you can provide your own worker by giving Gunicorn a Python path to a subclass of ``gunicorn.workers.base.Worker``. This alternative syntax will load the gevent class: ``gunicorn.workers.ggevent.GeventWorker``. ### `threads` **Command line:** `--threads INT` **Default:** `1` The number of worker threads for handling requests. Run each worker with the specified number of threads. A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. You'll want to vary this a bit to find the best for your particular application's work load. If it is not defined, the default is ``1``. This setting only affects the Gthread worker type. !!! note If you try to use the ``sync`` worker type and set the ``threads`` setting to more than 1, the ``gthread`` worker type will be used instead. ### `worker_connections` **Command line:** `--worker-connections INT` **Default:** `1000` The maximum number of simultaneous clients. This setting only affects the ``gthread``, ``eventlet`` and ``gevent`` worker types. ### `max_requests` **Command line:** `--max-requests INT` **Default:** `0` The maximum number of requests a worker will process before restarting. Any value greater than zero will limit the number of requests a worker will process before automatically restarting. This is a simple method to help limit the damage of memory leaks. If this is set to zero (the default) then the automatic worker restarts are disabled. ### `max_requests_jitter` **Command line:** `--max-requests-jitter INT` **Default:** `0` The maximum jitter to add to the *max_requests* setting. The jitter causes the restart per worker to be randomized by ``randint(0, max_requests_jitter)``. This is intended to stagger worker restarts to avoid all workers restarting at the same time. !!! info "Added in 19.2" ### `timeout` **Command line:** `-t INT`, `--timeout INT` **Default:** `30` Workers silent for more than this many seconds are killed and restarted. Value is a positive number or 0. Setting it to 0 has the effect of infinite timeouts by disabling timeouts for all workers entirely. Generally, the default of thirty seconds should suffice. Only set this noticeably higher if you're sure of the repercussions for sync workers. For the non sync workers it just means that the worker process is still communicating and is not tied to the length of time required to handle a single request. ### `graceful_timeout` **Command line:** `--graceful-timeout INT` **Default:** `30` Timeout for graceful workers restart in seconds. After receiving a restart signal, workers have this much time to finish serving requests. Workers still alive after the timeout (starting from the receipt of the restart signal) are force killed. ### `keepalive` **Command line:** `--keep-alive INT` **Default:** `2` The number of seconds to wait for requests on a Keep-Alive connection. Generally set in the 1-5 seconds range for servers with direct connection to the client (e.g. when you don't have separate load balancer). When Gunicorn is deployed behind a load balancer, it often makes sense to set this to a higher value. !!! note ``sync`` worker does not support persistent connections and will ignore this option. ### `asgi_loop` **Command line:** `--asgi-loop STRING` **Default:** `'auto'` Event loop implementation for ASGI workers. - auto: Use uvloop if available, otherwise asyncio - asyncio: Use Python's built-in asyncio event loop - uvloop: Use uvloop (must be installed separately) This setting only affects the ``asgi`` worker type. uvloop typically provides better performance but requires installing the uvloop package. !!! info "Added in 24.0.0" ### `asgi_lifespan` **Command line:** `--asgi-lifespan STRING` **Default:** `'auto'` Control ASGI lifespan protocol handling. - auto: Detect if app supports lifespan, enable if so - on: Always run lifespan protocol (fail if unsupported) - off: Never run lifespan protocol The lifespan protocol allows ASGI applications to run code at startup and shutdown. This is essential for frameworks like FastAPI that need to initialize database connections, caches, or other resources. This setting only affects the ``asgi`` worker type. !!! info "Added in 24.0.0" ### `asgi_disconnect_grace_period` **Command line:** `--asgi-disconnect-grace-period INT` **Default:** `3` Grace period (seconds) for ASGI apps to handle client disconnects. When a client disconnects, the ASGI app receives an http.disconnect message and has this many seconds to clean up resources (like database connections) before the request task is cancelled. Set to 0 to cancel immediately (not recommended for apps with async database connections). Apps with long-running database operations may need to increase this value. This setting only affects the ``asgi`` worker type. !!! info "Added in 25.0.0" benoitc-gunicorn-f5fb19e/docs/content/run.md000066400000000000000000000106371514360242400212220ustar00rootroot00000000000000# Running Gunicorn You can run Gunicorn directly from the command line or integrate it with popular frameworks like Django, Pyramid, or TurboGears. For deployment patterns see the [deployment guide](deploy.md). ## Commands After installation you have access to the `gunicorn` executable. ### `gunicorn` Basic usage: ```bash gunicorn [OPTIONS] [WSGI_APP] ``` `WSGI_APP` follows the pattern `MODULE_NAME:VARIABLE_NAME`. The module can be a full dotted path. The variable refers to a WSGI callable defined in that module. !!! info "Changed in 20.1.0" `WSGI_APP` can be omitted when defined in a [configuration file](configure.md). Example test application: ```python def app(environ, start_response): """Simplest possible application object""" data = b"Hello, World!\n" status = "200 OK" response_headers = [ ("Content-type", "text/plain"), ("Content-Length", str(len(data.md))) ] start_response(status, response_headers) return iter([data]) ``` Run it with: ```bash gunicorn --workers=2 test:app ``` You can also expose a factory function that returns the application: ```python def create_app(): app = FrameworkApp() ... return app ``` ```bash gunicorn --workers=2 'test:create_app()' ``` Passing positional and keyword arguments is supported but prefer configuration files or environment variables for anything beyond quick tests. #### Commonly used arguments - `-c CONFIG`, `--config CONFIG` — configuration file (`PATH`, `file:PATH`, or `python:MODULE_NAME`). - `-b BIND`, `--bind BIND` — socket to bind (host, host:port, `fd://FD`, or `unix:PATH`). - `-w WORKERS`, `--workers WORKERS` — number of worker processes, typically two to four per CPU core. See the [FAQ](faq.md) for tuning tips. - `-k WORKERCLASS`, `--worker-class WORKERCLASS` — worker type (`sync`, `gevent`, `tornado`, `gthread`). Read the [settings entry](reference/settings.md#worker_class) before switching classes. - `-n APP_NAME`, `--name APP_NAME` — set the process name (requires [`setproctitle`](https://pypi.python.org/pypi/setproctitle)). You can pass any setting via the environment variable `GUNICORN_CMD_ARGS`. See the [configuration guide](configure.md) and [settings reference](reference/settings.md) for details. ## Integration Gunicorn integrates cleanly with Django and Paste Deploy applications. ### Django Gunicorn looks for a WSGI callable named `application`. A typical invocation is: ```bash gunicorn myproject.wsgi ``` !!! note Ensure your project is on `PYTHONPATH`. The easiest way is to run this command from the directory containing `manage.py`. Set environment variables with `--env` and add your project to `PYTHONPATH` if needed: ```bash gunicorn --env DJANGO_SETTINGS_MODULE=myproject.settings myproject.wsgi ``` See [`raw_env`](reference/settings.md#raw_env) and [`pythonpath`](reference/settings.md#pythonpath) for more options. ### Paste Deployment Frameworks such as Pyramid and TurboGears often rely on Paste Deployment configuration. You can use Gunicorn in two ways. #### As a Paste server runner Let your framework command (for example `pserve` or `gearbox`) load Gunicorn by configuring it as the server: ```ini [server:main] use = egg:gunicorn#main host = 127.0.0.1 port = 8080 workers = 3 ``` This approach is quick to set up but Gunicorn cannot control how the application loads. Options like [`reload`](reference/settings.md#reload) will be ignored and hot upgrades are unavailable. Features such as daemon mode may conflict with what your framework already provides. Prefer running those features through the framework (for example `pserve --reload`). Advanced configuration is still possible by pointing the `config` key at a Gunicorn configuration file. #### Using Gunicorn's Paste support Use the [`paste`](reference/settings.md#paste) option to load a Paste configuration directly with the Gunicorn CLI. This unlocks Gunicorn's reloader and hot code upgrades, while still letting Paste define the application object. ```bash gunicorn --paste development.ini -b :8080 --chdir /path/to/project ``` Select a different application section by appending the name: ```bash gunicorn --paste development.ini#admin -b :8080 --chdir /path/to/project ``` In both modes Gunicorn will honor any Paste `loggers` configuration unless you override it with Gunicorn-specific [logging settings](reference/settings.md#logging). benoitc-gunicorn-f5fb19e/docs/content/signals.md000066400000000000000000000101741514360242400220520ustar00rootroot00000000000000 # Signal Handling A quick reference to the signals handled by Gunicorn. This includes the signals used internally to coordinate with worker processes. ## Master process - `QUIT`, `INT` — quick shutdown. - `TERM` — graceful shutdown; waits for workers to finish requests up to [`graceful_timeout`](reference/settings.md#graceful_timeout). - `HUP` — reload configuration, spawn new workers, and gracefully stop old ones. If the app is not preloaded (see [`preload_app`](reference/settings.md#preload_app)) the application code is reloaded too. - `TTIN` — increase worker count by one. - `TTOU` — decrease worker count by one. - `USR1` — reopen log files. - `USR2` — perform a binary upgrade. Send `TERM` to the old master afterwards to stop it. This also reloads preloaded applications (see [binary upgrades](#binary-upgrade)). - `WINCH` — gracefully stop workers when Gunicorn runs as a daemon. ## Worker process Workers rarely need direct signalling—if the master stays alive it will respawn workers automatically. - `QUIT`, `INT` — quick shutdown. - `TERM` — graceful shutdown. - `USR1` — reopen log files. ## Reload the configuration Use `HUP` to reload Gunicorn on the fly: ```text 2013-06-29 06:26:55 [20682] [INFO] Handling signal: hup 2013-06-29 06:26:55 [20682] [INFO] Hang up: Master 2013-06-29 06:26:55 [20703] [INFO] Booting worker with pid: 20703 2013-06-29 06:26:55 [20702] [INFO] Booting worker with pid: 20702 2013-06-29 06:26:55 [20688] [INFO] Worker exiting (pid: 20688) 2013-06-29 06:26:55 [20687] [INFO] Worker exiting (pid: 20687) 2013-06-29 06:26:55 [20689] [INFO] Worker exiting (pid: 20689) 2013-06-29 06:26:55 [20704] [INFO] Booting worker with pid: 20704 ``` Gunicorn reloads its settings, starts new workers, and gracefully shuts down the previous ones. If the app is not preloaded it reloads the application module as well. ## Upgrading to a new binary on the fly !!! info "Changed in 19.6.0" PID files now follow the pattern `.pid.2` instead of `.pid.oldbin`. You can replace the Gunicorn binary without downtime. Incoming requests remain served and preloaded applications reload. 1. Replace the old binary and send `USR2` to the master. Gunicorn starts a new master whose PID file ends with `.2` and spawns new workers. ```text PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 20844 benoitc 20 0 54808 11m 3352 S 0.0 0.1 0:00.36 gunicorn: master [test:app] 20849 benoitc 20 0 54808 9.9m 1500 S 0.0 0.1 0:00.02 gunicorn: worker [test:app] 20850 benoitc 20 0 54808 9.9m 1500 S 0.0 0.1 0:00.01 gunicorn: worker [test:app] 20851 benoitc 20 0 54808 9.9m 1500 S 0.0 0.1 0:00.01 gunicorn: worker [test:app] 20854 benoitc 20 0 55748 12m 3348 S 0.0 0.2 0:00.35 gunicorn: master [test:app] 20859 benoitc 20 0 55748 11m 1500 S 0.0 0.1 0:00.01 gunicorn: worker [test:app] 20860 benoitc 20 0 55748 11m 1500 S 0.0 0.1 0:00.00 gunicorn: worker [test:app] 20861 benoitc 20 0 55748 11m 1500 S 0.0 0.1 0:00.01 gunicorn: worker [test:app] ``` 2. Send `WINCH` to the old master to gracefully stop its workers. You can still roll back while the old master keeps its listen sockets: 1. Send `HUP` to the old master to restart its workers without reloading the config file. 2. Send `TERM` to the new master to shut down its workers gracefully. 3. Send `QUIT` to the new master to force it to exit. If the new workers linger, send `KILL` after the new master quits. To complete the upgrade, send `TERM` to the old master so only the new server continues running: ```text PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 20854 benoitc 20 0 55748 12m 3348 S 0.0 0.2 0:00.45 gunicorn: master [test:app] 20859 benoitc 20 0 55748 11m 1500 S 0.0 0.1 0:00.02 gunicorn: worker [test:app] 20860 benoitc 20 0 55748 11m 1500 S 0.0 0.1 0:00.02 gunicorn: worker [test:app] 20861 benoitc 20 0 55748 11m 1500 S 0.0 0.1 0:00.01 gunicorn: worker [test:app] ``` benoitc-gunicorn-f5fb19e/docs/content/sponsor.md000066400000000000000000000043351514360242400221170ustar00rootroot00000000000000# Support Gunicorn Gunicorn has been serving Python web applications since 2010. It's downloaded millions of times per month and runs in production at companies of all sizes. **This project is maintained entirely by volunteers.** Your support helps ensure continued development, security updates, and compatibility with new Python versions. ## Why Sponsor? - **Security**: Rapid response to vulnerabilities - **Reliability**: Bug fixes and stability improvements - **Compatibility**: Support for new Python versions and frameworks - **Features**: Continued development of ASGI, HTTP/2, and more - **Documentation**: Keeping guides and references up to date ## How to Support ### Donate

GitHub Sponsors Open Collective Revolut

- **[GitHub Sponsors](https://github.com/sponsors/benoitc)** - Monthly or one-time donations - **[Open Collective](https://opencollective.com/gunicorn)** - Transparent finances, tax-deductible in some regions - **[Revolut](https://checkout.revolut.com/pay/c934e028-3a71-44eb-b99c-491342df2044)** - Direct donations (individuals and companies) ### Corporate Sponsorship If gunicorn is part of your infrastructure, consider: - **Recurring sponsorship** through [GitHub Sponsors](https://github.com/sponsors/benoitc) or [Open Collective](https://opencollective.com/gunicorn) - **Sponsored support contracts** for priority bug fixes and feature requests - **Logo placement** on our website and README for sponsors For corporate inquiries: [benoitc@enki-multimedia.eu](mailto:benoitc@enki-multimedia.eu) ## Sponsors Thank you to all our sponsors and contributors who make gunicorn possible! --- *Every contribution, no matter the size, helps keep gunicorn running. Thank you!* benoitc-gunicorn-f5fb19e/docs/content/styles/000077500000000000000000000000001514360242400214105ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/docs/content/styles/overrides.css000066400000000000000000000250261514360242400241310ustar00rootroot00000000000000/* Gunicorn Punchy Theme */ :root { --gunicorn-green: #00a650; --gunicorn-green-dark: #008542; --gunicorn-green-light: #00c853; --gunicorn-teal: #00bfa5; --gunicorn-bg: #fafafa; --gunicorn-card: #ffffff; --md-primary-fg-color: var(--gunicorn-green); --md-primary-fg-color--light: var(--gunicorn-green-light); --md-primary-fg-color--dark: var(--gunicorn-green-dark); --md-accent-fg-color: var(--gunicorn-teal); --md-typeset-a-color: var(--gunicorn-green); } [data-md-color-scheme="slate"] { --gunicorn-bg: #0d1117; --gunicorn-card: #161b22; --md-default-bg-color: #0d1117; --md-default-bg-color--light: #161b22; } /* Header - punchy gradient */ .md-header { background: linear-gradient(135deg, var(--gunicorn-green-dark) 0%, var(--gunicorn-green) 100%); box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15); } .md-tabs { background: linear-gradient(135deg, var(--gunicorn-green) 0%, var(--gunicorn-green-light) 100%); } /* Logo bigger */ .md-header__button.md-logo img, .md-header__button.md-logo svg { height: 2rem; } /* Version badge in header */ .md-header__version { margin-left: 0.5rem; padding: 0.2rem 0.5rem; font-size: 0.7rem; font-weight: 600; color: var(--gunicorn-green-dark); background: rgba(255, 255, 255, 0.9); border-radius: 4px; text-decoration: none; vertical-align: middle; } .md-header__version:hover { background: #ffffff; color: var(--gunicorn-green); } /* Navigation styling */ .md-nav__link:hover { color: var(--gunicorn-green); } .md-nav__link--active { color: var(--gunicorn-green); font-weight: 600; } /* Code blocks - punchy */ .md-typeset code { background: rgba(0, 166, 80, 0.08); color: var(--gunicorn-green-dark); border-radius: 4px; } [data-md-color-scheme="slate"] .md-typeset code { background: rgba(0, 200, 83, 0.12); color: var(--gunicorn-green-light); } .md-typeset pre { border-radius: 8px; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); } [data-md-color-scheme="slate"] .md-typeset pre { box-shadow: 0 4px 16px rgba(0, 0, 0, 0.3); } /* Admonitions - punchy colors */ .md-typeset .admonition, .md-typeset details { border-radius: 8px; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06); } .md-typeset .admonition.note, .md-typeset details.note { border-color: var(--gunicorn-teal); } .md-typeset .note > .admonition-title, .md-typeset .note > summary { background-color: rgba(0, 191, 165, 0.1); } .md-typeset .admonition.tip, .md-typeset details.tip { border-color: var(--gunicorn-green); } .md-typeset .tip > .admonition-title, .md-typeset .tip > summary { background-color: rgba(0, 166, 80, 0.1); } /* Tables - cleaner */ .md-typeset table:not([class]) { border-radius: 8px; overflow: hidden; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06); } .md-typeset table:not([class]) th { background: var(--gunicorn-green); color: white; font-weight: 600; } /* Buttons - punchy */ .md-typeset .md-button { border-radius: 8px; font-weight: 600; text-transform: none; letter-spacing: 0; transition: all 0.2s ease; } .md-typeset .md-button--primary { background: linear-gradient(135deg, var(--gunicorn-green) 0%, var(--gunicorn-green-light) 100%); border: none; box-shadow: 0 4px 12px rgba(0, 166, 80, 0.3); } .md-typeset .md-button--primary:hover { box-shadow: 0 6px 20px rgba(0, 166, 80, 0.4); transform: translateY(-2px); } /* Search */ .md-search__form { border-radius: 8px; } /* Footer */ .md-footer { background: linear-gradient(135deg, var(--gunicorn-green-dark) 0%, #1a1a2e 100%); } .md-footer-meta { background: rgba(0, 0, 0, 0.2); } /* Scrollbar */ ::-webkit-scrollbar { width: 8px; height: 8px; } ::-webkit-scrollbar-thumb { background: var(--gunicorn-green); border-radius: 4px; } ::-webkit-scrollbar-thumb:hover { background: var(--gunicorn-green-light); } /* Selection */ ::selection { background: rgba(0, 166, 80, 0.3); } /* ================================ Homepage Specific Styles ================================ */ /* These are for the non-custom template pages */ .md-typeset .hero { margin: 2rem 0 3rem; padding: 3.5rem; background: linear-gradient(135deg, var(--gunicorn-green-dark) 0%, var(--gunicorn-green) 50%, var(--gunicorn-teal) 100%); color: #fff; border-radius: 16px; box-shadow: 0 20px 60px rgba(0, 166, 80, 0.25); } [data-md-color-scheme="slate"] .md-typeset .hero { background: linear-gradient(135deg, #0d1117 0%, var(--gunicorn-green-dark) 50%, var(--gunicorn-green) 100%); box-shadow: 0 20px 60px rgba(0, 0, 0, 0.5); } .md-typeset .hero__inner { display: flex; flex-wrap: wrap; gap: 2.5rem; align-items: center; justify-content: space-between; } .md-typeset .hero__copy { flex: 1 1 320px; max-width: 520px; font-size: 1.05rem; line-height: 1.6; } .md-typeset .hero__copy h1 { margin: 0 0 1rem; font-size: 2.6rem; font-weight: 800; line-height: 1.15; letter-spacing: -0.02em; } .md-typeset .hero__tagline { font-size: 1.15rem; opacity: 0.95; margin-bottom: 0; } .md-typeset .hero__cta { margin-top: 2rem; display: flex; flex-wrap: wrap; gap: 1rem; } .md-typeset .hero__code { flex: 1 1 260px; max-width: 400px; background: rgba(0, 0, 0, 0.25); border-radius: 12px; padding: 1.5rem; backdrop-filter: blur(8px); border: 1px solid rgba(255, 255, 255, 0.1); } .md-typeset .hero__code pre { margin: 0 0 1rem; border: none; background: rgba(0, 0, 0, 0.4); color: #e8f5ea; box-shadow: none; } .md-typeset .hero__logo { height: 72px; margin-bottom: 1.5rem; filter: drop-shadow(0 4px 12px rgba(0, 0, 0, 0.2)); } /* Pillars */ .md-typeset .pillars { display: grid; grid-template-columns: repeat(auto-fit, minmax(240px, 1fr)); gap: 2rem; margin: 3rem 0; } .md-typeset .pillar { text-align: center; padding: 2rem; background: var(--gunicorn-card); border-radius: 12px; box-shadow: 0 4px 20px rgba(0, 0, 0, 0.06); transition: transform 0.2s ease, box-shadow 0.2s ease; } [data-md-color-scheme="slate"] .md-typeset .pillar { background: var(--gunicorn-card); box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3); } .md-typeset .pillar:hover { transform: translateY(-4px); box-shadow: 0 12px 32px rgba(0, 166, 80, 0.15); } .md-typeset .pillar__icon { font-size: 3rem; margin-bottom: 1rem; } .md-typeset .pillar h3 { margin: 0 0 0.5rem; font-size: 1.3rem; font-weight: 700; color: var(--gunicorn-green-dark); } [data-md-color-scheme="slate"] .md-typeset .pillar h3 { color: var(--gunicorn-green-light); } .md-typeset .pillar p { margin: 0; font-size: 0.95rem; opacity: 0.8; } /* Frameworks */ .md-typeset .frameworks { display: flex; flex-wrap: wrap; gap: 1rem; justify-content: center; margin: 2rem 0 3rem; } .md-typeset .framework { background: var(--gunicorn-card); border: 2px solid transparent; border-radius: 50px; padding: 0.75rem 1.75rem; font-weight: 600; font-size: 0.95rem; color: var(--gunicorn-green-dark); box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06); transition: all 0.2s ease; } [data-md-color-scheme="slate"] .md-typeset .framework { background: var(--gunicorn-card); color: #e8f5ea; } .md-typeset .framework:hover { border-color: var(--gunicorn-green); transform: translateY(-2px); box-shadow: 0 8px 24px rgba(0, 166, 80, 0.2); } /* Feature Grid */ .md-typeset .feature-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(260px, 1fr)); gap: 1.5rem; margin: 2.5rem 0 3rem; } .md-typeset .feature-card { background: var(--gunicorn-card); border-radius: 12px; padding: 1.75rem; border: 1px solid rgba(0, 166, 80, 0.1); box-shadow: 0 4px 16px rgba(0, 0, 0, 0.06); transition: all 0.2s ease; } [data-md-color-scheme="slate"] .md-typeset .feature-card { background: var(--gunicorn-card); border-color: rgba(0, 200, 83, 0.15); box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3); } .md-typeset .feature-card:hover { transform: translateY(-4px); border-color: var(--gunicorn-green); box-shadow: 0 12px 32px rgba(0, 166, 80, 0.15); } .md-typeset .feature-card h3 { margin-top: 0; font-size: 1.2rem; font-weight: 700; color: var(--gunicorn-green-dark); display: flex; align-items: center; gap: 0.5rem; } [data-md-color-scheme="slate"] .md-typeset .feature-card h3 { color: var(--gunicorn-green-light); } .md-typeset .feature-card p { font-size: 0.95rem; opacity: 0.8; margin-bottom: 1rem; } .md-typeset .feature-card a { display: inline-flex; align-items: center; gap: 0.35rem; font-weight: 600; color: var(--gunicorn-green); } .md-typeset .feature-card a:hover { color: var(--gunicorn-green-light); } /* Badge */ .md-typeset .badge { display: inline-block; font-size: 0.65rem; font-weight: 700; text-transform: uppercase; padding: 0.2rem 0.6rem; border-radius: 50px; vertical-align: middle; letter-spacing: 0.05em; } .md-typeset .badge--new { background: linear-gradient(135deg, var(--gunicorn-green) 0%, var(--gunicorn-teal) 100%); color: #fff; } /* Quick Links */ .md-typeset .quick-links { display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 1rem; margin: 2rem 0; } .md-typeset .quick-link { display: block; padding: 1.5rem; background: var(--gunicorn-card); border-radius: 12px; border: 2px solid transparent; text-decoration: none; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.06); transition: all 0.2s ease; } [data-md-color-scheme="slate"] .md-typeset .quick-link { background: var(--gunicorn-card); } .md-typeset .quick-link:hover { border-color: var(--gunicorn-green); transform: translateY(-2px); box-shadow: 0 8px 24px rgba(0, 166, 80, 0.15); } .md-typeset .quick-link strong { display: block; font-size: 1.1rem; font-weight: 700; color: var(--gunicorn-green-dark); margin-bottom: 0.25rem; } [data-md-color-scheme="slate"] .md-typeset .quick-link strong { color: var(--gunicorn-green-light); } .md-typeset .quick-link span { font-size: 0.9rem; opacity: 0.7; } /* Community Links */ .md-typeset .community-links { margin: 1.5rem 0; } .md-typeset .community-links ul { list-style: none; padding: 0; margin: 0; } .md-typeset .community-links li { margin-bottom: 0.75rem; } /* Footer */ .md-footer-meta__inner { flex-wrap: wrap; } /* Responsive */ @media (max-width: 960px) { .md-typeset .hero { padding: 2.5rem; } .md-typeset .hero__copy h1 { font-size: 2rem; } } @media (max-width: 720px) { .md-typeset .hero { margin-top: 1.5rem; padding: 2rem; } .md-typeset .hero__cta { flex-direction: column; align-items: stretch; } .md-typeset .hero__code { width: 100%; } .md-typeset .pillars { grid-template-columns: 1fr; } } benoitc-gunicorn-f5fb19e/docs/content/uwsgi.md000066400000000000000000000136571514360242400215610ustar00rootroot00000000000000# uWSGI Protocol Gunicorn supports the uWSGI binary protocol, allowing it to receive requests from nginx using the `uwsgi_pass` directive. This provides efficient communication between nginx and Gunicorn without HTTP overhead. Both **WSGI** and **ASGI** workers support the uWSGI protocol. !!! note This is the **uWSGI binary protocol**, not the uWSGI server. Gunicorn implements the protocol to receive requests from nginx, similar to how the uWSGI server would. ## Quick Start Enable uWSGI protocol support: ```bash # WSGI application gunicorn myapp:app --protocol uwsgi --bind 127.0.0.1:8000 # ASGI application gunicorn myapp:app --worker-class asgi --protocol uwsgi --bind 127.0.0.1:8000 ``` Configure nginx to forward requests: ```nginx upstream gunicorn { server 127.0.0.1:8000; } server { listen 80; server_name example.com; location / { uwsgi_pass gunicorn; include uwsgi_params; } } ``` ## Why Use uWSGI Protocol? The uWSGI binary protocol offers several advantages over HTTP proxying: - **Lower overhead** - Binary format is more compact than HTTP headers - **Better integration** - nginx's native uwsgi module is highly optimized - **Simpler configuration** - No need to reconstruct HTTP headers ## Configuration ### Protocol Setting Switch from HTTP to uWSGI protocol: ```bash gunicorn myapp:app --protocol uwsgi ``` Or in a configuration file: ```python # gunicorn.conf.py protocol = "uwsgi" ``` ### Allowed IPs By default, uWSGI protocol requests are only accepted from localhost (`127.0.0.1` and `::1`). This prevents unauthorized hosts from sending requests directly to Gunicorn. To allow additional IPs: ```bash gunicorn myapp:app --protocol uwsgi --uwsgi-allow-from 10.0.0.1,10.0.0.2 ``` To allow all IPs (not recommended for production): ```bash gunicorn myapp:app --protocol uwsgi --uwsgi-allow-from '*' ``` !!! warning Only allow IPs from trusted sources. The uWSGI protocol does not provide authentication, so anyone who can connect can send requests. !!! note UNIX socket connections are always allowed regardless of this setting. ### Using UNIX Sockets For better performance and security, use UNIX sockets instead of TCP: ```bash gunicorn myapp:app --protocol uwsgi --bind unix:/run/gunicorn.sock ``` Nginx configuration: ```nginx upstream gunicorn { server unix:/run/gunicorn.sock; } server { listen 80; location / { uwsgi_pass gunicorn; include uwsgi_params; } } ``` ## Nginx Configuration ### Basic Setup Create or verify the `uwsgi_params` file exists (usually at `/etc/nginx/uwsgi_params`): ```nginx uwsgi_param QUERY_STRING $query_string; uwsgi_param REQUEST_METHOD $request_method; uwsgi_param CONTENT_TYPE $content_type; uwsgi_param CONTENT_LENGTH $content_length; uwsgi_param REQUEST_URI $request_uri; uwsgi_param PATH_INFO $document_uri; uwsgi_param DOCUMENT_ROOT $document_root; uwsgi_param SERVER_PROTOCOL $server_protocol; uwsgi_param REQUEST_SCHEME $scheme; uwsgi_param HTTPS $https if_not_empty; uwsgi_param REMOTE_ADDR $remote_addr; uwsgi_param REMOTE_PORT $remote_port; uwsgi_param SERVER_PORT $server_port; uwsgi_param SERVER_NAME $server_name; ``` ### With SSL Termination When nginx handles SSL and forwards to Gunicorn: ```nginx server { listen 443 ssl; server_name example.com; ssl_certificate /path/to/cert.pem; ssl_certificate_key /path/to/key.pem; location / { uwsgi_pass gunicorn; include uwsgi_params; uwsgi_param HTTPS on; } } ``` ### Load Balancing Distribute requests across multiple Gunicorn instances: ```nginx upstream gunicorn { least_conn; server 127.0.0.1:8000; server 127.0.0.1:8001; server 127.0.0.1:8002; } server { listen 80; location / { uwsgi_pass gunicorn; include uwsgi_params; } } ``` ### Static Files Serve static files directly from nginx: ```nginx server { listen 80; location /static/ { alias /path/to/static/; } location / { uwsgi_pass gunicorn; include uwsgi_params; } } ``` ## Protocol Details The uWSGI protocol uses a compact binary format: | Bytes | Field | Description | |-------|-------|-------------| | 0 | modifier1 | Packet type (0 = WSGI request) | | 1-2 | datasize | Size of vars block (little-endian) | | 3 | modifier2 | Additional flags (usually 0) | After the header, the vars block contains CGI-style key-value pairs: ``` [2-byte key_size][key][2-byte val_size][value]... ``` Standard CGI variables like `REQUEST_METHOD`, `PATH_INFO`, and `QUERY_STRING` are extracted from this block to construct the WSGI environ. ## Combining with HTTP You can run Gunicorn with both HTTP and uWSGI protocol support by running separate instances: ```bash # HTTP for direct access gunicorn myapp:app --bind 127.0.0.1:8080 # uWSGI for nginx gunicorn myapp:app --protocol uwsgi --bind 127.0.0.1:8000 ``` ## Troubleshooting ### ForbiddenUWSGIRequest Error If you see "Forbidden uWSGI request from IP", the connecting IP is not in the allowed list. Either: 1. Add the IP to `--uwsgi-allow-from` 2. Use UNIX sockets instead 3. Ensure nginx is connecting from an allowed IP ### Invalid uWSGI Header This usually means: 1. HTTP traffic is being sent to a uWSGI endpoint 2. The packet is malformed or truncated 3. Network issues caused data corruption Verify that nginx is using `uwsgi_pass` (not `proxy_pass`) and that the `uwsgi_params` file is being included. ### Headers Missing If certain headers aren't reaching your application, verify they're included in `uwsgi_params`. Custom headers should be passed as: ```nginx uwsgi_param HTTP_X_CUSTOM_HEADER $http_x_custom_header; ``` ## See Also - [Settings Reference](reference/settings.md#protocol) - Protocol and uWSGI settings - [Deploy](deploy.md) - General deployment guidance - [Design](design.md) - Worker architecture overview benoitc-gunicorn-f5fb19e/docs/macros.py000066400000000000000000000007741514360242400202610ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from importlib import import_module def define_env(env): """Register template variables for MkDocs macros.""" gunicorn = import_module("gunicorn") env.variables.update( release=gunicorn.__version__, version=gunicorn.__version__, github_repo="https://github.com/benoitc/gunicorn", pypi_url=f"https://pypi.org/project/gunicorn/{gunicorn.__version__}/", ) benoitc-gunicorn-f5fb19e/examples/000077500000000000000000000000001514360242400173015ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/alt_spec.py000066400000000000000000000014661514360242400214540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # An example of how to pass information from the command line to # a WSGI app. Only applies to the native WSGI workers used by # Gunicorn sync (default) workers. # # $ gunicorn 'alt_spec:load(arg)' # # Single quoting is generally necessary for shell escape semantics. # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. def load(arg): def app(environ, start_response): data = b'Hello, %s!\n' % arg status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', str(len(data))) ] start_response(status, response_headers) return iter([data]) return app benoitc-gunicorn-f5fb19e/examples/asgi/000077500000000000000000000000001514360242400202245ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/asgi/__init__.py000066400000000000000000000002321514360242400223320ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI example applications for gunicorn. """ benoitc-gunicorn-f5fb19e/examples/asgi/basic_app.py000066400000000000000000000075551514360242400225330ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Basic ASGI application example. Run with: gunicorn -k asgi examples.asgi.basic_app:app Test with: curl http://127.0.0.1:8000/ curl http://127.0.0.1:8000/hello curl -X POST http://127.0.0.1:8000/echo -d "test data" """ async def app(scope, receive, send): """Simple ASGI application demonstrating basic functionality.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) elif scope["type"] == "http": await handle_http(scope, receive, send) else: raise ValueError(f"Unknown scope type: {scope['type']}") async def handle_lifespan(scope, receive, send): """Handle lifespan events (startup/shutdown).""" while True: message = await receive() if message["type"] == "lifespan.startup": print("ASGI application starting up...") await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": print("ASGI application shutting down...") await send({"type": "lifespan.shutdown.complete"}) return async def handle_http(scope, receive, send): """Handle HTTP requests.""" path = scope["path"] method = scope["method"] if path == "/" and method == "GET": await send_response(send, 200, b"Welcome to gunicorn ASGI!\n") elif path == "/hello" and method == "GET": name = get_query_param(scope, "name", "World") body = f"Hello, {name}!\n".encode() await send_response(send, 200, body) elif path == "/echo" and method == "POST": body = await read_body(receive) await send_response(send, 200, body, content_type=b"application/octet-stream") elif path == "/headers": headers_info = format_headers(scope["headers"]) await send_response(send, 200, headers_info.encode()) elif path == "/info": info = format_request_info(scope) await send_response(send, 200, info.encode(), content_type=b"application/json") else: await send_response(send, 404, b"Not Found\n") async def send_response(send, status, body, content_type=b"text/plain"): """Send an HTTP response.""" await send({ "type": "http.response.start", "status": status, "headers": [ (b"content-type", content_type), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, }) async def read_body(receive): """Read the full request body.""" body = b"" while True: message = await receive() body += message.get("body", b"") if not message.get("more_body", False): break return body def get_query_param(scope, name, default=None): """Get a query parameter value.""" query_string = scope.get("query_string", b"").decode() for param in query_string.split("&"): if "=" in param: key, value = param.split("=", 1) if key == name: return value return default def format_headers(headers): """Format headers for display.""" lines = ["Request Headers:"] for name, value in headers: lines.append(f" {name.decode()}: {value.decode()}") return "\n".join(lines) + "\n" def format_request_info(scope): """Format request info as JSON.""" import json info = { "method": scope["method"], "path": scope["path"], "query_string": scope.get("query_string", b"").decode(), "http_version": scope["http_version"], "scheme": scope["scheme"], "server": list(scope.get("server") or []), "client": list(scope.get("client") or []), "root_path": scope.get("root_path", ""), } return json.dumps(info, indent=2) + "\n" benoitc-gunicorn-f5fb19e/examples/asgi/websocket_app.py000066400000000000000000000154171514360242400234340ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WebSocket ASGI application example. Run with: gunicorn -k asgi examples.asgi.websocket_app:app Test with: # Using websocat (install with: cargo install websocat) websocat ws://127.0.0.1:8000/ws # Or using Python websockets library python -c " import asyncio import websockets async def test(): async with websockets.connect('ws://127.0.0.1:8000/ws') as ws: await ws.send('Hello') print(await ws.recv()) asyncio.run(test()) " """ async def app(scope, receive, send): """ASGI application with WebSocket support.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) elif scope["type"] == "http": await handle_http(scope, receive, send) elif scope["type"] == "websocket": await handle_websocket(scope, receive, send) else: raise ValueError(f"Unknown scope type: {scope['type']}") async def handle_lifespan(scope, receive, send): """Handle lifespan events.""" while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return async def handle_http(scope, receive, send): """Handle HTTP requests - serve a simple HTML page for WebSocket testing.""" path = scope["path"] if path == "/": html = HTML_PAGE.encode() await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/html"), (b"content-length", str(len(html)).encode()), ], }) await send({ "type": "http.response.body", "body": html, }) else: await send({ "type": "http.response.start", "status": 404, "headers": [(b"content-type", b"text/plain")], }) await send({ "type": "http.response.body", "body": b"Not Found", }) async def handle_websocket(scope, receive, send): """Handle WebSocket connections.""" path = scope["path"] if path == "/ws": await echo_websocket(scope, receive, send) elif path == "/ws/chat": await chat_websocket(scope, receive, send) else: # Reject the connection await send({"type": "websocket.close", "code": 4004}) async def echo_websocket(scope, receive, send): """Echo WebSocket - sends back whatever it receives.""" # Wait for connection message = await receive() if message["type"] != "websocket.connect": return # Accept the connection await send({"type": "websocket.accept"}) # Echo loop try: while True: message = await receive() if message["type"] == "websocket.disconnect": break if message["type"] == "websocket.receive": if "text" in message: # Echo text back await send({ "type": "websocket.send", "text": f"Echo: {message['text']}" }) elif "bytes" in message: # Echo bytes back await send({ "type": "websocket.send", "bytes": message["bytes"] }) except Exception as e: print(f"WebSocket error: {e}") finally: try: await send({"type": "websocket.close", "code": 1000}) except Exception: pass async def chat_websocket(scope, receive, send): """Chat WebSocket - simple broadcast example.""" message = await receive() if message["type"] != "websocket.connect": return await send({ "type": "websocket.accept", "subprotocol": "chat" }) await send({ "type": "websocket.send", "text": "Welcome to the chat! Send messages and they will be echoed back." }) try: while True: message = await receive() if message["type"] == "websocket.disconnect": break if message["type"] == "websocket.receive" and "text" in message: text = message["text"] await send({ "type": "websocket.send", "text": f"[You]: {text}" }) except Exception: pass HTML_PAGE = """ WebSocket Test

WebSocket Test

""" benoitc-gunicorn-f5fb19e/examples/bad.py000066400000000000000000000004661514360242400204070ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import tempfile files = [] def app(environ, start_response): files.append(tempfile.mkstemp()) start_response('200 OK', [('Content-type', 'text/plain'), ('Content-length', '2')]) return ['ok'] benoitc-gunicorn-f5fb19e/examples/boot_fail.py000066400000000000000000000003251514360242400216110ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. raise RuntimeError("Bad app!") def app(environ, start_response): assert 1 == 2, "Shouldn't get here." benoitc-gunicorn-f5fb19e/examples/celery_alternative/000077500000000000000000000000001514360242400231625ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/celery_alternative/Dockerfile000066400000000000000000000024211514360242400251530ustar00rootroot00000000000000# Dockerfile for Celery Replacement Example # # This demonstrates running a production-ready application with # Gunicorn dirty arbiters replacing Celery for background tasks. # # Key difference from Celery deployment: # - Celery: Needs separate web + worker containers + Redis/RabbitMQ # - Dirty: Single container handles both HTTP and background tasks FROM python:3.12-slim # Set working directory WORKDIR /app # Install system dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ curl \ && rm -rf /var/lib/apt/lists/* # Copy gunicorn source and install (from build context root) COPY . /gunicorn-src RUN pip install --no-cache-dir /gunicorn-src # Copy example application COPY examples/celery_alternative /app RUN pip install --no-cache-dir fastapi uvloop requests pytest # Environment variables ENV PYTHONUNBUFFERED=1 ENV PYTHONDONTWRITEBYTECODE=1 ENV PYTHONPATH=/gunicorn-src ENV GUNICORN_BIND=0.0.0.0:8000 ENV GUNICORN_WORKERS=4 ENV DIRTY_WORKERS=9 ENV DIRTY_TIMEOUT=300 ENV LOG_LEVEL=info # Expose port EXPOSE 8000 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ CMD curl -f http://localhost:8000/health || exit 1 # Run gunicorn with dirty arbiters CMD ["gunicorn", "-c", "gunicorn_conf.py", "app:app"] benoitc-gunicorn-f5fb19e/examples/celery_alternative/README.md000066400000000000000000000233641514360242400244510ustar00rootroot00000000000000# Celery Alternative Example This example demonstrates how to replace Celery with Gunicorn's **dirty arbiters** for background task processing, using **async ASGI** for non-blocking HTTP handling. ## Why Use This Instead of Celery? ### The Problem with Celery Celery requires: - An external message broker (Redis or RabbitMQ) - Separate worker processes (`celery -A app worker`) - Stateless workers that reload models/connections on every task - Polling or WebSockets for progress updates ### What Dirty Arbiters Provide | Feature | Celery | Dirty Arbiters | |---------|--------|----------------| | **External broker** | Required (Redis/RabbitMQ) | None - uses Unix sockets | | **Deployment** | Multiple processes | Single `gunicorn` command | | **Worker state** | Stateless | Stateful - keep ML models, DB connections loaded | | **Progress updates** | Polling or WebSocket | Native streaming | | **HTTP blocking** | N/A (separate process) | Non-blocking with async ASGI | ### When to Use Dirty Arbiters **Good fit:** - Tasks that benefit from keeping state (ML models, DB connection pools, caches) - Tasks where you want immediate results (not fire-and-forget) - Real-time progress streaming - Simpler deployment without external dependencies **Not ideal for:** - True fire-and-forget queuing with persistence - Distributed task execution across multiple machines - Tasks that must survive server restarts ## How It Works ``` ┌─────────────────────────────────────────────────────────────┐ │ Gunicorn Master │ ├─────────────────────────────────────────────────────────────┤ │ │ │ ┌─────────────────────────────────────────────────────┐ │ │ │ ASGI Workers (uvloop) │ │ │ │ Non-blocking! One worker handles many requests │ │ │ │ await client.execute_async() doesn't block │ │ │ └──────────────────────────┬──────────────────────────┘ │ │ │ │ │ Unix Socket IPC │ │ │ │ │ ┌──────────────────────────┼──────────────────────────┐ │ │ │ Dirty Workers (Stateful) │ │ │ │ │ │ │ │ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ │ │ │ │EmailWorker │ │ImageWorker │ │DataWorker │ ... │ │ │ │ │ (2 procs) │ │ (2 procs) │ │ (4 procs) │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ SMTP conn │ │ PIL loaded │ │ DB pool │ │ │ │ │ │ kept alive │ │ in memory │ │ cached │ │ │ │ │ └────────────┘ └────────────┘ └────────────┘ │ │ │ │ │ │ │ │ Dirty Arbiter │ │ │ └──────────────────────────────────────────────────────┘ │ │ │ └─────────────────────────────────────────────────────────────┘ ``` **Key insight:** The HTTP workers use async I/O, so `await client.execute_async()` doesn't block the event loop. One ASGI worker can handle thousands of concurrent requests while waiting for dirty workers to complete tasks. ## Quick Start ### Local Development ```bash # Install dependencies pip install fastapi uvloop httpx pytest pytest-asyncio pip install -e ../.. # Install gunicorn from source # Run the application gunicorn -c gunicorn_conf.py app:app # In another terminal, test it curl http://localhost:8000/health curl -X POST http://localhost:8000/api/email/send \ -H "Content-Type: application/json" \ -d '{"to": "test@example.com", "subject": "Hello", "body": "World"}' # Interactive API docs open http://localhost:8000/docs ``` ### Docker ```bash # Build and run docker compose up --build # Run with tests docker compose --profile test up --build --abort-on-container-exit ``` ## Task Workers Each worker class maintains state across requests: ### EmailWorker (2 workers) - Keeps SMTP connection alive - `send_email(to, subject, body)` - Send single email - `send_bulk_emails(recipients, subject, body)` - Bulk send with streaming progress ### ImageWorker (2 workers) - Keeps PIL/image libraries loaded - `resize(image_data, width, height)` - Resize image - `process_batch(images, operation)` - Batch process with streaming ### DataWorker (4 workers) - Maintains DB connection pool and query cache - `aggregate(data, group_by, agg_field)` - Aggregate data - `etl_pipeline(source_data, transformations)` - ETL with streaming progress - `cached_query(query_key, ttl)` - Query with in-memory caching ### ScheduledWorker (1 worker) - For periodic tasks (call from cron) - `cleanup_old_files(directory, max_age_days)` - `generate_daily_report()` ## Streaming Progress Example Real-time progress without polling: ```python import httpx import json async with httpx.AsyncClient() as client: async with client.stream( "POST", "http://localhost:8000/api/email/send-bulk", json={ "recipients": ["a@x.com", "b@x.com", "c@x.com"], "subject": "Newsletter", "body": "Hello!", }, ) as response: async for line in response.aiter_lines(): if line.startswith("data: "): progress = json.loads(line[6:]) if progress["type"] == "progress": print(f"Progress: {progress['percent']}%") elif progress["type"] == "complete": print(f"Done! Sent: {progress['sent']}") ``` ## Celery Migration Guide ### Before (Celery) ```python # tasks.py from celery import Celery app = Celery('tasks', broker='redis://localhost') @app.task def send_email(to, subject, body): smtp = smtplib.SMTP(...) # New connection every task! smtp.send(...) return {"status": "sent"} @app.task(bind=True) def send_bulk(self, recipients, subject, body): for i, to in enumerate(recipients): send_email(to, subject, body) self.update_state(state='PROGRESS', meta={'current': i}) # Requires polling! ``` ```python # views.py - Flask from tasks import send_email @app.route('/send') def send_view(): send_email.delay(to, subject, body) # Fire and forget return {"status": "queued"} # Can't get result without polling ``` ### After (Dirty Arbiters) ```python # tasks.py from gunicorn.dirty.app import DirtyApp class EmailWorker(DirtyApp): workers = 2 def init(self): self.smtp = smtplib.SMTP(...) # Connected once, reused! def __call__(self, action, *args, **kwargs): return getattr(self, action)(*args, **kwargs) def send_email(self, to, subject, body): self.smtp.send(...) # Reuses connection return {"status": "sent"} def send_bulk(self, recipients, subject, body): for i, to in enumerate(recipients): self.send_email(to, subject, body) yield {"type": "progress", "current": i} # Native streaming! ``` ```python # views.py - FastAPI (async) from gunicorn.dirty import get_dirty_client_async @app.post('/send') async def send_view(data: EmailRequest): client = await get_dirty_client_async() # Non-blocking! Other requests handled while waiting result = await client.execute_async("tasks:EmailWorker", "send_email", ...) return result # Immediate result, no polling! ``` ## Configuration ```python # gunicorn_conf.py # ASGI workers for non-blocking HTTP worker_class = "asgi" asgi_loop = "uvloop" workers = 4 # Dirty workers (replace Celery) dirty_apps = [ "tasks:EmailWorker", "tasks:ImageWorker", "tasks:DataWorker", ] dirty_workers = 9 dirty_timeout = 300 ``` ## Running Tests ```bash # Unit tests (no server needed) pytest tests/test_tasks.py -v # Integration tests (server must be running) APP_URL=http://localhost:8000 pytest tests/test_integration.py -v # All tests via Docker docker compose --profile test up --build --abort-on-container-exit ``` ## API Endpoints Visit `/docs` for interactive Swagger documentation. | Endpoint | Method | Description | |----------|--------|-------------| | `/api/email/send` | POST | Send single email | | `/api/email/send-bulk` | POST | Bulk send (SSE streaming) | | `/api/image/resize` | POST | Resize image | | `/api/image/process-batch` | POST | Batch process (SSE streaming) | | `/api/data/aggregate` | POST | Aggregate data | | `/api/data/etl` | POST | ETL pipeline (SSE streaming) | | `/api/data/query` | POST | Cached query | | `/api/scheduled/*` | POST | Scheduled tasks | | `/health` | GET | Health check | benoitc-gunicorn-f5fb19e/examples/celery_alternative/app.py000066400000000000000000000326541514360242400243260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Web Application - FastAPI app demonstrating Celery replacement. This shows how to call dirty arbiter tasks from your web application using the async API, which doesn't block the event loop. Key difference from sync (Flask/gthread): - `await client.execute_async()` is non-blocking - A single worker can handle many concurrent requests - True async I/O - other requests proceed while waiting for task results """ import json from contextlib import asynccontextmanager from fastapi import FastAPI, HTTPException from fastapi.responses import StreamingResponse from pydantic import BaseModel from gunicorn.dirty import get_dirty_client_async from gunicorn.dirty.errors import ( DirtyError, DirtyTimeoutError, ) # Task worker import paths (like Celery task names) EMAIL_WORKER = "examples.celery_alternative.tasks:EmailWorker" IMAGE_WORKER = "examples.celery_alternative.tasks:ImageWorker" DATA_WORKER = "examples.celery_alternative.tasks:DataWorker" SCHEDULED_WORKER = "examples.celery_alternative.tasks:ScheduledWorker" @asynccontextmanager async def lifespan(app: FastAPI): """Application lifespan - startup and shutdown.""" yield app = FastAPI( title="Celery Replacement Demo", description="Demonstrating Gunicorn dirty arbiters as Celery replacement with async ASGI", lifespan=lifespan, ) # ============================================================================ # Request/Response Models # ============================================================================ class EmailRequest(BaseModel): to: str subject: str body: str html: bool = False class BulkEmailRequest(BaseModel): recipients: list[str] subject: str body: str class ImageResizeRequest(BaseModel): image_data: str = "" width: int = 800 height: int = 600 class ThumbnailRequest(BaseModel): image_data: str = "" size: int = 150 class ImageBatchRequest(BaseModel): images: list[dict] operation: str = "resize" width: int = 800 height: int = 600 size: int = 150 class AggregateRequest(BaseModel): data: list[dict] group_by: str agg_field: str agg_func: str = "sum" class ETLRequest(BaseModel): source_data: list[dict] transformations: list[dict] = [] class QueryRequest(BaseModel): query_key: str ttl: int = 300 class CleanupRequest(BaseModel): directory: str = "/tmp" max_age_days: int = 7 class SyncRequest(BaseModel): source: str = "default" # ============================================================================ # Email Tasks - Like Celery email tasks # ============================================================================ @app.post("/api/email/send") async def send_email(data: EmailRequest): """ Send a single email. Celery equivalent: send_email.delay(to, subject, body) With async dirty client, this doesn't block the event loop! Other requests can be handled while waiting for the task. """ try: client = await get_dirty_client_async() result = await client.execute_async( EMAIL_WORKER, "send_email", to=data.to, subject=data.subject, body=data.body, html=data.html, ) return result except DirtyTimeoutError: raise HTTPException(status_code=504, detail="Task timed out") except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/api/email/send-bulk") async def send_bulk_emails(data: BulkEmailRequest): """ Send bulk emails with streaming progress. Celery equivalent: result = send_bulk.apply_async([recipients, subject, body]) while not result.ready(): print(result.info) # Progress polling With dirty arbiters, progress is streamed in real-time! """ async def generate(): try: client = await get_dirty_client_async() async for progress in client.stream_async( EMAIL_WORKER, "send_bulk_emails", recipients=data.recipients, subject=data.subject, body=data.body, ): yield f"data: {json.dumps(progress)}\n\n" except DirtyError as e: yield f"data: {json.dumps({'error': str(e)})}\n\n" return StreamingResponse( generate(), media_type="text/event-stream", headers={ "Cache-Control": "no-cache", "X-Accel-Buffering": "no", }, ) @app.get("/api/email/stats") async def email_stats(): """Get email worker statistics.""" try: client = await get_dirty_client_async() result = await client.execute_async(EMAIL_WORKER, "stats") return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) # ============================================================================ # Image Tasks - Like Celery image processing tasks # ============================================================================ @app.post("/api/image/resize") async def resize_image(data: ImageResizeRequest): """ Resize an image. Celery equivalent: resize_image.delay(image_data, width, height) """ try: client = await get_dirty_client_async() result = await client.execute_async( IMAGE_WORKER, "resize", image_data=data.image_data, width=data.width, height=data.height, ) return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/api/image/thumbnail") async def generate_thumbnail(data: ThumbnailRequest): """Generate a thumbnail.""" try: client = await get_dirty_client_async() result = await client.execute_async( IMAGE_WORKER, "generate_thumbnail", image_data=data.image_data, size=data.size, ) return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/api/image/process-batch") async def process_image_batch(data: ImageBatchRequest): """ Process multiple images with progress streaming. """ async def generate(): try: client = await get_dirty_client_async() async for progress in client.stream_async( IMAGE_WORKER, "process_batch", images=data.images, operation=data.operation, width=data.width, height=data.height, size=data.size, ): yield f"data: {json.dumps(progress)}\n\n" except DirtyError as e: yield f"data: {json.dumps({'error': str(e)})}\n\n" return StreamingResponse( generate(), media_type="text/event-stream", ) @app.get("/api/image/stats") async def image_stats(): """Get image worker statistics.""" try: client = await get_dirty_client_async() result = await client.execute_async(IMAGE_WORKER, "stats") return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) # ============================================================================ # Data Tasks - Like Celery data processing tasks # ============================================================================ @app.post("/api/data/aggregate") async def aggregate_data(data: AggregateRequest): """ Aggregate data. Celery equivalent: aggregate_data.delay(data, group_by, agg_field, agg_func) """ try: client = await get_dirty_client_async() result = await client.execute_async( DATA_WORKER, "aggregate", data=data.data, group_by=data.group_by, agg_field=data.agg_field, agg_func=data.agg_func, ) return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/api/data/etl") async def run_etl(data: ETLRequest): """ Run ETL pipeline with streaming progress. Celery equivalent: chain(extract.s(), transform.s(), load.s()).apply_async() """ async def generate(): try: client = await get_dirty_client_async() async for progress in client.stream_async( DATA_WORKER, "etl_pipeline", source_data=data.source_data, transformations=data.transformations, ): yield f"data: {json.dumps(progress)}\n\n" except DirtyError as e: yield f"data: {json.dumps({'error': str(e)})}\n\n" return StreamingResponse( generate(), media_type="text/event-stream", ) @app.post("/api/data/query") async def cached_query(data: QueryRequest): """Execute a cached query.""" try: client = await get_dirty_client_async() result = await client.execute_async( DATA_WORKER, "cached_query", query_key=data.query_key, ttl=data.ttl, ) return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.get("/api/data/stats") async def data_stats(): """Get data worker statistics.""" try: client = await get_dirty_client_async() result = await client.execute_async(DATA_WORKER, "stats") return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) # ============================================================================ # Scheduled Tasks - Like Celery Beat tasks # ============================================================================ @app.post("/api/scheduled/cleanup") async def run_cleanup(data: CleanupRequest = CleanupRequest()): """Run cleanup task (normally triggered by cron).""" try: client = await get_dirty_client_async() result = await client.execute_async( SCHEDULED_WORKER, "cleanup_old_files", directory=data.directory, max_age_days=data.max_age_days, ) return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/api/scheduled/daily-report") async def run_daily_report(): """Generate daily report.""" try: client = await get_dirty_client_async() result = await client.execute_async(SCHEDULED_WORKER, "generate_daily_report") return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.post("/api/scheduled/sync") async def run_sync(data: SyncRequest = SyncRequest()): """Sync external data.""" try: client = await get_dirty_client_async() result = await client.execute_async( SCHEDULED_WORKER, "sync_external_data", source=data.source, ) return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) @app.get("/api/scheduled/stats") async def scheduled_stats(): """Get scheduled worker statistics.""" try: client = await get_dirty_client_async() result = await client.execute_async(SCHEDULED_WORKER, "stats") return result except DirtyError as e: raise HTTPException(status_code=500, detail=str(e)) # ============================================================================ # Health & Info Endpoints # ============================================================================ @app.get("/") async def index(): """API documentation.""" return { "name": "Celery Replacement Demo", "description": "Demonstrating Gunicorn dirty arbiters as Celery replacement (async ASGI)", "docs": "/docs", "endpoints": { "email": { "POST /api/email/send": "Send single email", "POST /api/email/send-bulk": "Send bulk emails (streaming)", "GET /api/email/stats": "Email worker stats", }, "image": { "POST /api/image/resize": "Resize image", "POST /api/image/thumbnail": "Generate thumbnail", "POST /api/image/process-batch": "Batch process (streaming)", "GET /api/image/stats": "Image worker stats", }, "data": { "POST /api/data/aggregate": "Aggregate data", "POST /api/data/etl": "Run ETL pipeline (streaming)", "POST /api/data/query": "Cached query", "GET /api/data/stats": "Data worker stats", }, "scheduled": { "POST /api/scheduled/cleanup": "Run cleanup", "POST /api/scheduled/daily-report": "Generate report", "POST /api/scheduled/sync": "Sync external data", "GET /api/scheduled/stats": "Scheduled worker stats", }, }, } @app.get("/health") async def health(): """Health check endpoint.""" try: client = await get_dirty_client_async() # Quick ping to verify workers are running await client.execute_async(EMAIL_WORKER, "stats") return {"status": "healthy", "workers": "connected"} except DirtyError: raise HTTPException( status_code=503, detail={"status": "degraded", "workers": "unavailable"} ) benoitc-gunicorn-f5fb19e/examples/celery_alternative/docker-compose.yml000066400000000000000000000033271514360242400266240ustar00rootroot00000000000000# Docker Compose for Celery Replacement Example # # Notice: Only ONE service needed! # Compare with typical Celery deployment which requires: # - web (gunicorn/uvicorn) # - celery_worker # - celery_beat (for scheduled tasks) # - redis or rabbitmq # # With dirty arbiters, everything runs in a single container. services: app: build: context: ../.. # Gunicorn repo root dockerfile: examples/celery_alternative/Dockerfile ports: - "8003:8000" environment: - GUNICORN_WORKERS=4 - DIRTY_WORKERS=9 - DIRTY_TIMEOUT=300 - LOG_LEVEL=info healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 start_period: 10s # Resource limits (optional) deploy: resources: limits: memory: 1G reservations: memory: 256M # Test runner service tests: build: context: ../.. dockerfile: examples/celery_alternative/Dockerfile depends_on: app: condition: service_healthy environment: - APP_URL=http://app:8000 command: ["python", "-m", "pytest", "tests/", "-v", "--tb=short"] profiles: - test # For comparison, here's what a Celery deployment would look like: # # services: # web: # build: . # command: gunicorn app:app -b 0.0.0.0:8000 # ports: # - "8000:8000" # depends_on: # - redis # # celery_worker: # build: . # command: celery -A tasks worker -l info # depends_on: # - redis # # celery_beat: # build: . # command: celery -A tasks beat -l info # depends_on: # - redis # # redis: # image: redis:alpine # ports: # - "6379:6379" benoitc-gunicorn-f5fb19e/examples/celery_alternative/gunicorn_conf.py000066400000000000000000000123251514360242400263700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn Configuration - Celery Replacement Example This configuration sets up: 1. ASGI workers to handle web requests with async I/O (using uvloop) 2. Dirty workers to handle background tasks (replacing Celery workers) Why ASGI + Dirty Arbiters? - ASGI: Non-blocking HTTP handling - one worker handles many concurrent requests - Dirty: Stateful background workers - keep models/connections loaded in memory Comparison with Celery deployment: - Celery: gunicorn app:app + celery -A tasks worker + redis-server - Dirty: gunicorn -c gunicorn_conf.py app:app (single command, no broker!) """ import multiprocessing import os # ============================================================================= # Basic Settings # ============================================================================= # Bind to all interfaces on port 8000 bind = os.environ.get("GUNICORN_BIND", "0.0.0.0:8000") # HTTP workers - handle incoming web requests # With ASGI, fewer workers needed since each handles many concurrent requests workers = int(os.environ.get("GUNICORN_WORKERS", min(4, multiprocessing.cpu_count() + 1))) # Use gunicorn's native ASGI worker for async support # This enables: await client.execute_async() without blocking worker_class = "asgi" # Use uvloop for better async performance asgi_loop = "uvloop" # Maximum concurrent connections per worker worker_connections = 1000 # ============================================================================= # Dirty Arbiter Settings (Celery Worker Replacement) # ============================================================================= # Task workers - these replace Celery workers # Each dirty app can specify its own worker count via the `workers` class attribute dirty_apps = [ # Email tasks - 2 workers (I/O bound) "examples.celery_alternative.tasks:EmailWorker", # Image processing - 2 workers (CPU/memory intensive) "examples.celery_alternative.tasks:ImageWorker", # Data processing - 4 workers (parallelizable) "examples.celery_alternative.tasks:DataWorker", # Scheduled tasks - 1 worker "examples.celery_alternative.tasks:ScheduledWorker", ] # Total dirty workers (distributed among apps based on their `workers` attribute) # If not set, uses sum of all app worker counts dirty_workers = int(os.environ.get("DIRTY_WORKERS", 9)) # 2+2+4+1 = 9 # Task timeout in seconds (like Celery's task_time_limit) dirty_timeout = int(os.environ.get("DIRTY_TIMEOUT", 300)) # Threads per dirty worker (for concurrent task execution) dirty_threads = int(os.environ.get("DIRTY_THREADS", 1)) # Graceful shutdown timeout dirty_graceful_timeout = int(os.environ.get("DIRTY_GRACEFUL_TIMEOUT", 30)) # ============================================================================= # Timeouts & Limits # ============================================================================= # Worker timeout (seconds) timeout = 120 # Keep-alive connections keepalive = 5 # Maximum requests per worker before recycling max_requests = 1000 max_requests_jitter = 50 # ============================================================================= # Logging # ============================================================================= # Log level loglevel = os.environ.get("LOG_LEVEL", "info") # Access log format accesslog = "-" access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" %(D)s' # Error log errorlog = "-" # ============================================================================= # Lifecycle Hooks # ============================================================================= def on_starting(server): """Called just before the master process is initialized.""" print("=" * 60) print("Starting Gunicorn with Dirty Arbiters (Celery Replacement)") print("Using ASGI workers with uvloop for non-blocking HTTP handling") print("=" * 60) def on_dirty_starting(arbiter): """Called when the dirty arbiter is starting.""" print(f"[Dirty] Starting dirty arbiter") print(f"[Dirty] Registered apps: {list(arbiter.cfg.dirty_apps)}") def dirty_post_fork(arbiter, worker): """Called after a dirty worker is forked.""" print(f"[Dirty] Worker {worker.pid} started") def dirty_worker_init(worker): """Called when a dirty worker initializes its apps.""" print(f"[Dirty] Worker {worker.pid} initialized apps: {list(worker.apps.keys())}") def dirty_worker_exit(arbiter, worker): """Called when a dirty worker exits.""" print(f"[Dirty] Worker {worker.pid} exiting") def worker_int(worker): """Called when a worker receives SIGINT.""" print(f"[HTTP] Worker {worker.pid} interrupted") def worker_exit(server, worker): """Called when a worker exits.""" print(f"[HTTP] Worker {worker.pid} exited") # ============================================================================= # Development vs Production # ============================================================================= # Reload on code changes (development only) reload = os.environ.get("GUNICORN_RELOAD", "false").lower() == "true" # Preload app for faster worker startup (production) preload_app = os.environ.get("GUNICORN_PRELOAD", "false").lower() == "true" benoitc-gunicorn-f5fb19e/examples/celery_alternative/requirements.txt000066400000000000000000000001751514360242400264510ustar00rootroot00000000000000# Celery Replacement Example Dependencies fastapi>=0.109.0 uvloop>=0.19.0 httpx>=0.26.0 pytest>=8.0.0 pytest-asyncio>=0.23.0 benoitc-gunicorn-f5fb19e/examples/celery_alternative/run_tests.sh000077500000000000000000000017671514360242400255620ustar00rootroot00000000000000#!/bin/bash # Run tests for Celery Replacement example set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" GUNICORN_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" # Add gunicorn to Python path export PYTHONPATH="$GUNICORN_ROOT:$PYTHONPATH" cd "$SCRIPT_DIR" echo "==========================================" echo "Running Unit Tests" echo "==========================================" python -m pytest tests/test_tasks.py -v --tb=short echo "" echo "==========================================" echo "Unit tests passed!" echo "==========================================" # Check if integration tests should run if [ "$1" == "--integration" ] || [ "$1" == "-i" ]; then APP_URL="${APP_URL:-http://localhost:8000}" echo "" echo "==========================================" echo "Running Integration Tests against $APP_URL" echo "==========================================" python -m pytest tests/test_integration.py -v --tb=short fi echo "" echo "All tests completed successfully!" benoitc-gunicorn-f5fb19e/examples/celery_alternative/tasks.py000066400000000000000000000434721514360242400246730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Task Workers - Celery Replacement using Gunicorn Dirty Arbiters This module demonstrates how to replace Celery with Gunicorn's dirty arbiter feature for background task processing. Key benefits: 1. No external broker (Redis/RabbitMQ) needed - uses Unix sockets 2. Stateful workers - maintain connections, models, caches across requests 3. Integrated with your WSGI/ASGI app - no separate process management 4. Streaming support for progress reporting 5. Per-task-type worker allocation for memory optimization Comparison with Celery: - Celery: @app.task decorator -> Dirty: DirtyApp class with methods - Celery: task.delay() -> Dirty: client.execute() - Celery: task.apply_async() -> Dirty: client.execute() with timeout - Celery: task progress -> Dirty: client.stream() with generators """ import hashlib import json import os import random import smtplib import time from datetime import datetime from email.mime.text import MIMEText from typing import Any, Generator from gunicorn.dirty.app import DirtyApp class EmailWorker(DirtyApp): """ Email task worker - like Celery's @app.task for email sending. Maintains SMTP connection pool across requests for efficiency. In Celery, you'd create a new connection per task or manage it manually. """ # Limit to 2 workers since email sending is I/O bound workers = 2 def __init__(self): self.smtp_connection = None self.emails_sent = 0 self.last_connected = None def init(self): """Called once when worker starts - establish SMTP connection.""" self._connect_smtp() def _connect_smtp(self): """Establish SMTP connection (simulated for demo).""" # In production, connect to real SMTP server: # self.smtp_connection = smtplib.SMTP('smtp.example.com', 587) # self.smtp_connection.starttls() # self.smtp_connection.login(user, password) self.last_connected = datetime.now().isoformat() self.smtp_connection = "connected" # Simulated def __call__(self, action: str, *args, **kwargs) -> Any: """Dispatch to action methods.""" method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def send_email(self, to: str, subject: str, body: str, html: bool = False) -> dict: """ Send a single email. Equivalent to Celery: @app.task def send_email(to, subject, body): ... """ # Simulate email sending delay time.sleep(random.uniform(0.1, 0.3)) self.emails_sent += 1 return { "status": "sent", "to": to, "subject": subject, "message_id": f"msg-{self.emails_sent}-{int(time.time())}", "timestamp": datetime.now().isoformat(), } def send_bulk_emails(self, recipients: list, subject: str, body: str) -> Generator[dict, None, None]: """ Send bulk emails with progress streaming. This is where dirty arbiters shine over Celery - real-time progress without polling or WebSockets. Equivalent to Celery: @app.task(bind=True) def send_bulk(self, recipients, subject, body): for i, to in enumerate(recipients): send_email(to, subject, body) self.update_state(state='PROGRESS', meta={'current': i, 'total': len(recipients)}) """ total = len(recipients) sent = 0 failed = 0 for i, to in enumerate(recipients): try: result = self.send_email(to, subject, body) sent += 1 yield { "type": "progress", "current": i + 1, "total": total, "percent": int((i + 1) / total * 100), "last_sent": to, "status": "sent", } except Exception as e: failed += 1 yield { "type": "progress", "current": i + 1, "total": total, "percent": int((i + 1) / total * 100), "last_sent": to, "status": "failed", "error": str(e), } # Final summary yield { "type": "complete", "total": total, "sent": sent, "failed": failed, } def stats(self) -> dict: """Get worker statistics.""" return { "emails_sent": self.emails_sent, "smtp_connected": self.smtp_connection is not None, "last_connected": self.last_connected, "worker_pid": os.getpid(), } def close(self): """Cleanup on shutdown.""" if self.smtp_connection and self.smtp_connection != "connected": self.smtp_connection.quit() class ImageWorker(DirtyApp): """ Image processing worker - demonstrates CPU-intensive tasks. Like Celery tasks for image resizing, thumbnails, watermarks. Keeps image processing libraries loaded in memory. """ # Limit to 2 workers - image processing is memory intensive workers = 2 def __init__(self): self.pil_available = False self.images_processed = 0 def init(self): """Load image processing libraries once at startup.""" try: # Try to import PIL - optional dependency from PIL import Image self.pil_available = True except ImportError: self.pil_available = False def __call__(self, action: str, *args, **kwargs) -> Any: method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def resize(self, image_data: str, width: int, height: int) -> dict: """ Resize an image. Equivalent to Celery: @app.task def resize_image(image_path, width, height): img = Image.open(image_path) img.thumbnail((width, height)) img.save(output_path) """ # Simulate image processing time.sleep(random.uniform(0.2, 0.5)) self.images_processed += 1 # Create a fake "processed" result # In production, image_data would be base64 decoded data_size = len(image_data) if isinstance(image_data, str) else len(image_data) result_hash = hashlib.md5( f"{data_size}{width}{height}".encode() ).hexdigest()[:16] return { "status": "resized", "original_size": data_size, "target_dimensions": f"{width}x{height}", "result_id": f"img-{result_hash}", "pil_used": self.pil_available, } def generate_thumbnail(self, image_data: str, size: int = 150) -> dict: """Generate a thumbnail.""" return self.resize(image_data, size, size) def process_batch(self, images: list, operation: str, **params) -> Generator[dict, None, None]: """ Process multiple images with progress streaming. """ total = len(images) for i, img_info in enumerate(images): try: # Simulate fetching image data image_data = img_info.get("data", b"fake_image_data") if operation == "resize": result = self.resize( image_data, params.get("width", 800), params.get("height", 600) ) elif operation == "thumbnail": result = self.generate_thumbnail( image_data, params.get("size", 150) ) else: result = {"error": f"Unknown operation: {operation}"} yield { "type": "progress", "current": i + 1, "total": total, "percent": int((i + 1) / total * 100), "image_id": img_info.get("id", f"img-{i}"), "result": result, } except Exception as e: yield { "type": "error", "current": i + 1, "total": total, "image_id": img_info.get("id", f"img-{i}"), "error": str(e), } yield { "type": "complete", "total": total, "processed": self.images_processed, } def stats(self) -> dict: return { "images_processed": self.images_processed, "pil_available": self.pil_available, "worker_pid": os.getpid(), } class DataWorker(DirtyApp): """ Data processing worker - demonstrates stateful data operations. Maintains database connections, caches, and processing state. Perfect for ETL tasks, report generation, data aggregation. """ # More workers for data tasks - they're often parallelizable workers = 4 def __init__(self): self.cache = {} self.db_connection = None self.tasks_completed = 0 def init(self): """Initialize database connection and cache.""" # In production: self.db_connection = create_engine(DATABASE_URL) self.db_connection = "connected" self.cache = {} def __call__(self, action: str, *args, **kwargs) -> Any: method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def aggregate(self, data: list, group_by: str, agg_field: str, agg_func: str = "sum") -> dict: """ Aggregate data - like a Celery task for report generation. Equivalent to Celery: @app.task def aggregate_sales(data, group_by, agg_field): df = pd.DataFrame(data) return df.groupby(group_by)[agg_field].sum().to_dict() """ # Simulate aggregation time.sleep(random.uniform(0.1, 0.3)) result = {} for item in data: key = item.get(group_by, "unknown") value = item.get(agg_field, 0) if key not in result: if agg_func in ("sum", "count"): result[key] = 0 else: result[key] = [] if agg_func == "sum": result[key] += value elif agg_func == "count": result[key] += 1 elif agg_func == "list": result[key].append(value) self.tasks_completed += 1 return { "status": "completed", "group_by": group_by, "agg_func": agg_func, "result": result, "record_count": len(data), } def etl_pipeline(self, source_data: list, transformations: list) -> Generator[dict, None, None]: """ Run an ETL pipeline with progress streaming. This replaces Celery chains/chords for multi-step processing: chain(extract.s(), transform.s(), load.s()) """ total_steps = len(transformations) + 2 # +2 for extract and load current_step = 0 data = source_data # Extract phase yield { "type": "progress", "phase": "extract", "step": current_step + 1, "total_steps": total_steps, "message": f"Extracting {len(data)} records", } time.sleep(0.2) # Simulate extraction current_step += 1 # Transform phases for i, transform in enumerate(transformations): transform_name = transform.get("name", f"transform_{i}") transform_type = transform.get("type", "passthrough") yield { "type": "progress", "phase": "transform", "step": current_step + 1, "total_steps": total_steps, "message": f"Applying {transform_name}", } # Apply transformation if transform_type == "filter": field = transform.get("field") value = transform.get("value") data = [d for d in data if d.get(field) == value] elif transform_type == "map": field = transform.get("field") func = transform.get("func", "upper") for d in data: if field in d and isinstance(d[field], str): if func == "upper": d[field] = d[field].upper() elif func == "lower": d[field] = d[field].lower() time.sleep(0.2) # Simulate transformation current_step += 1 # Load phase yield { "type": "progress", "phase": "load", "step": current_step + 1, "total_steps": total_steps, "message": f"Loading {len(data)} records", } time.sleep(0.2) # Simulate loading self.tasks_completed += 1 # Final result yield { "type": "complete", "records_processed": len(source_data), "records_output": len(data), "transformations_applied": len(transformations), } def cached_query(self, query_key: str, ttl: int = 300) -> dict: """ Execute a cached query - demonstrates stateful caching. Unlike Celery where you'd use Redis for caching, the dirty worker maintains its own in-memory cache. """ now = time.time() if query_key in self.cache: cached = self.cache[query_key] if now - cached["timestamp"] < ttl: return { "status": "cache_hit", "data": cached["data"], "cached_at": cached["timestamp"], "age_seconds": int(now - cached["timestamp"]), } # Simulate query execution time.sleep(random.uniform(0.2, 0.4)) # Generate fake result result_data = { "query": query_key, "rows": random.randint(10, 100), "computed_at": now, } self.cache[query_key] = { "data": result_data, "timestamp": now, } return { "status": "cache_miss", "data": result_data, "cached_at": now, } def stats(self) -> dict: return { "tasks_completed": self.tasks_completed, "cache_size": len(self.cache), "db_connected": self.db_connection is not None, "worker_pid": os.getpid(), } def close(self): """Cleanup on shutdown.""" self.cache.clear() if self.db_connection and self.db_connection != "connected": self.db_connection.close() class ScheduledWorker(DirtyApp): """ Scheduled task worker - for periodic/scheduled tasks. While dirty arbiters don't have built-in scheduling like Celery Beat, you can call these from a simple cron job or scheduler. """ workers = 1 # Single worker for scheduled tasks def __init__(self): self.last_runs = {} self.run_counts = {} def __call__(self, action: str, *args, **kwargs) -> Any: method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") # Track runs self.last_runs[action] = datetime.now().isoformat() self.run_counts[action] = self.run_counts.get(action, 0) + 1 return method(*args, **kwargs) def cleanup_old_files(self, directory: str, max_age_days: int = 7) -> dict: """ Cleanup old files - like a Celery periodic task. Equivalent to Celery Beat: @app.task def cleanup(): ... app.conf.beat_schedule = { 'cleanup-every-hour': { 'task': 'tasks.cleanup', 'schedule': 3600.0, }, } """ # Simulate cleanup time.sleep(0.3) files_deleted = random.randint(0, 10) return { "status": "completed", "directory": directory, "files_deleted": files_deleted, "space_freed_mb": files_deleted * random.uniform(0.1, 5.0), } def generate_daily_report(self) -> dict: """Generate daily report.""" time.sleep(0.5) return { "status": "completed", "report_date": datetime.now().strftime("%Y-%m-%d"), "metrics": { "active_users": random.randint(100, 1000), "new_signups": random.randint(10, 50), "revenue": random.uniform(1000, 10000), }, } def sync_external_data(self, source: str) -> dict: """Sync data from external source.""" time.sleep(0.4) return { "status": "completed", "source": source, "records_synced": random.randint(50, 500), "sync_time": datetime.now().isoformat(), } def stats(self) -> dict: return { "last_runs": self.last_runs, "run_counts": self.run_counts, "worker_pid": os.getpid(), } benoitc-gunicorn-f5fb19e/examples/celery_alternative/tests/000077500000000000000000000000001514360242400243245ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/celery_alternative/tests/__init__.py000066400000000000000000000001721514360242400264350ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Tests package benoitc-gunicorn-f5fb19e/examples/celery_alternative/tests/conftest.py000066400000000000000000000005271514360242400265270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Pytest configuration for Celery Replacement tests. """ import sys from pathlib import Path # Add gunicorn source to path for imports gunicorn_root = Path(__file__).parent.parent.parent.parent sys.path.insert(0, str(gunicorn_root)) benoitc-gunicorn-f5fb19e/examples/celery_alternative/tests/test_integration.py000066400000000000000000000322611514360242400302640ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Integration Tests for Celery Replacement Example These tests run against the full application with Gunicorn and dirty arbiters. They can be run locally or in Docker. Usage: # Local (with gunicorn running): APP_URL=http://localhost:8000 pytest tests/test_integration.py -v # Docker: docker compose --profile test up --build --abort-on-container-exit """ import json import os import time import pytest import requests # Get app URL from environment or use default APP_URL = os.environ.get("APP_URL", "http://localhost:8000") def read_sse_events(response, max_events=100): """ Read SSE events from a streaming response. Stops when receiving a 'complete' or 'error' event, or max_events reached. """ events = [] for line in response.iter_lines(decode_unicode=True): if line.startswith("data: "): data = json.loads(line[6:]) events.append(data) if data.get("type") in ("complete", "error"): break if len(events) >= max_events: break return events def wait_for_app(timeout=30): """Wait for the application to be ready.""" start = time.time() while time.time() - start < timeout: try: resp = requests.get(f"{APP_URL}/health", timeout=5) if resp.status_code == 200: return True except requests.exceptions.ConnectionError: pass time.sleep(1) return False @pytest.fixture(scope="module", autouse=True) def ensure_app_running(): """Ensure the application is running before tests.""" if not wait_for_app(): pytest.skip("Application not available") class TestHealthEndpoint: """Test health check endpoint.""" def test_health_check(self): """Test that health endpoint returns healthy status.""" resp = requests.get(f"{APP_URL}/health") assert resp.status_code == 200 data = resp.json() assert data["status"] == "healthy" assert data["workers"] == "connected" class TestEmailTasks: """Integration tests for email tasks.""" def test_send_single_email(self): """Test sending a single email via API.""" resp = requests.post( f"{APP_URL}/api/email/send", json={ "to": "test@example.com", "subject": "Integration Test", "body": "Hello from integration test", }, ) assert resp.status_code == 200 data = resp.json() assert data["status"] == "sent" assert data["to"] == "test@example.com" assert "message_id" in data def test_send_bulk_emails_streaming(self): """Test bulk email sending with SSE streaming.""" recipients = ["a@test.com", "b@test.com", "c@test.com"] resp = requests.post( f"{APP_URL}/api/email/send-bulk", json={ "recipients": recipients, "subject": "Bulk Test", "body": "Hello all", }, stream=True, ) assert resp.status_code == 200 events = read_sse_events(resp) # Should have progress for each recipient + complete assert len(events) == len(recipients) + 1 # Check progress events for i, event in enumerate(events[:-1]): assert event["type"] == "progress" assert event["current"] == i + 1 # Check complete event assert events[-1]["type"] == "complete" assert events[-1]["sent"] == len(recipients) def test_email_stats(self): """Test email worker statistics endpoint.""" # Send an email first requests.post( f"{APP_URL}/api/email/send", json={"to": "x@x.com", "subject": "S", "body": "B"}, ) resp = requests.get(f"{APP_URL}/api/email/stats") assert resp.status_code == 200 data = resp.json() assert data["emails_sent"] >= 1 assert data["smtp_connected"] is True assert "worker_pid" in data class TestImageTasks: """Integration tests for image tasks.""" def test_resize_image(self): """Test image resizing via API.""" resp = requests.post( f"{APP_URL}/api/image/resize", json={ "image_data": "base64_encoded_image_data", "width": 800, "height": 600, }, ) assert resp.status_code == 200 data = resp.json() assert data["status"] == "resized" assert data["target_dimensions"] == "800x600" def test_generate_thumbnail(self): """Test thumbnail generation via API.""" resp = requests.post( f"{APP_URL}/api/image/thumbnail", json={ "image_data": "base64_image", "size": 150, }, ) assert resp.status_code == 200 data = resp.json() assert data["status"] == "resized" assert data["target_dimensions"] == "150x150" def test_batch_processing_streaming(self): """Test batch image processing with streaming.""" images = [ {"id": "img1", "data": "data1"}, {"id": "img2", "data": "data2"}, ] resp = requests.post( f"{APP_URL}/api/image/process-batch", json={ "images": images, "operation": "resize", "width": 400, "height": 300, }, stream=True, ) assert resp.status_code == 200 events = read_sse_events(resp) assert len(events) == len(images) + 1 assert events[-1]["type"] == "complete" def test_image_stats(self): """Test image worker statistics.""" resp = requests.get(f"{APP_URL}/api/image/stats") assert resp.status_code == 200 data = resp.json() assert "images_processed" in data assert "worker_pid" in data class TestDataTasks: """Integration tests for data processing tasks.""" def test_aggregate_data(self): """Test data aggregation via API.""" resp = requests.post( f"{APP_URL}/api/data/aggregate", json={ "data": [ {"category": "A", "value": 10}, {"category": "B", "value": 20}, {"category": "A", "value": 30}, ], "group_by": "category", "agg_field": "value", "agg_func": "sum", }, ) assert resp.status_code == 200 data = resp.json() assert data["status"] == "completed" assert data["result"]["A"] == 40 assert data["result"]["B"] == 20 def test_etl_pipeline_streaming(self): """Test ETL pipeline with streaming progress.""" resp = requests.post( f"{APP_URL}/api/data/etl", json={ "source_data": [ {"name": "alice", "status": "active"}, {"name": "bob", "status": "inactive"}, {"name": "charlie", "status": "active"}, ], "transformations": [ {"name": "filter", "type": "filter", "field": "status", "value": "active"}, ], }, stream=True, ) assert resp.status_code == 200 events = read_sse_events(resp) # extract + transform + load + complete assert len(events) == 4 # Check phases phases = [e.get("phase") for e in events[:-1]] assert "extract" in phases assert "transform" in phases assert "load" in phases # Final result assert events[-1]["type"] == "complete" assert events[-1]["records_output"] == 2 def test_cached_query(self): """Test cached query functionality.""" query_key = f"test_query_{time.time()}" # First call - cache miss resp1 = requests.post( f"{APP_URL}/api/data/query", json={"query_key": query_key, "ttl": 300}, ) assert resp1.status_code == 200 assert resp1.json()["status"] == "cache_miss" # Second call - may be cache hit or miss depending on which worker handles it # (cache is per-worker, not shared) # Retry a few times to likely hit the same worker cache_hit = False for _ in range(5): resp2 = requests.post( f"{APP_URL}/api/data/query", json={"query_key": query_key, "ttl": 300}, ) assert resp2.status_code == 200 if resp2.json()["status"] == "cache_hit": cache_hit = True break assert cache_hit, "Expected cache_hit after multiple requests to same key" def test_data_stats(self): """Test data worker statistics.""" resp = requests.get(f"{APP_URL}/api/data/stats") assert resp.status_code == 200 data = resp.json() assert "tasks_completed" in data assert "cache_size" in data class TestScheduledTasks: """Integration tests for scheduled tasks.""" def test_cleanup_task(self): """Test cleanup task execution.""" resp = requests.post( f"{APP_URL}/api/scheduled/cleanup", json={"directory": "/tmp/test", "max_age_days": 7}, ) assert resp.status_code == 200 data = resp.json() assert data["status"] == "completed" assert "files_deleted" in data def test_daily_report(self): """Test daily report generation.""" resp = requests.post(f"{APP_URL}/api/scheduled/daily-report") assert resp.status_code == 200 data = resp.json() assert data["status"] == "completed" assert "metrics" in data def test_sync_task(self): """Test data sync task.""" resp = requests.post( f"{APP_URL}/api/scheduled/sync", json={"source": "test_source"}, ) assert resp.status_code == 200 data = resp.json() assert data["status"] == "completed" assert data["source"] == "test_source" def test_scheduled_stats(self): """Test scheduled worker statistics.""" # Run a task first requests.post(f"{APP_URL}/api/scheduled/daily-report") resp = requests.get(f"{APP_URL}/api/scheduled/stats") assert resp.status_code == 200 data = resp.json() assert "run_counts" in data assert "generate_daily_report" in data["run_counts"] class TestConcurrency: """Test concurrent task execution.""" def test_concurrent_requests(self): """Test that multiple concurrent requests are handled.""" import concurrent.futures def send_email(): return requests.post( f"{APP_URL}/api/email/send", json={"to": "x@x.com", "subject": "Concurrent", "body": "Test"}, ) # Send 10 concurrent requests with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(send_email) for _ in range(10)] results = [f.result() for f in futures] # All should succeed assert all(r.status_code == 200 for r in results) assert all(r.json()["status"] == "sent" for r in results) def test_mixed_task_types(self): """Test different task types running concurrently.""" import concurrent.futures def email_task(): return requests.post( f"{APP_URL}/api/email/send", json={"to": "x@x.com", "subject": "S", "body": "B"}, ) def image_task(): return requests.post( f"{APP_URL}/api/image/resize", json={"image_data": "x", "width": 100, "height": 100}, ) def data_task(): return requests.post( f"{APP_URL}/api/data/aggregate", json={ "data": [{"a": 1}], "group_by": "a", "agg_field": "a", "agg_func": "sum", }, ) with concurrent.futures.ThreadPoolExecutor(max_workers=9) as executor: futures = [] for _ in range(3): futures.append(executor.submit(email_task)) futures.append(executor.submit(image_task)) futures.append(executor.submit(data_task)) results = [f.result() for f in futures] # All should succeed assert all(r.status_code == 200 for r in results) class TestErrorHandling: """Test error handling scenarios.""" def test_invalid_action(self): """Test that invalid actions return appropriate errors.""" # This would require modifying the API to expose raw execute # For now, we test via a malformed request resp = requests.post( f"{APP_URL}/api/email/send", json={}, # Missing required fields ) # Should get a validation error (FastAPI returns 422) assert resp.status_code == 422 benoitc-gunicorn-f5fb19e/examples/celery_alternative/tests/test_tasks.py000066400000000000000000000241221514360242400270630ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Unit Tests for Task Workers These tests verify the task worker logic without running Gunicorn. They test the DirtyApp classes directly. """ import pytest from examples.celery_alternative.tasks import ( EmailWorker, ImageWorker, DataWorker, ScheduledWorker, ) class TestEmailWorker: """Tests for EmailWorker task class.""" def setup_method(self): """Set up test fixtures.""" self.worker = EmailWorker() self.worker.init() def test_send_email(self): """Test sending a single email.""" result = self.worker("send_email", to="test@example.com", subject="Test", body="Hello") assert result["status"] == "sent" assert result["to"] == "test@example.com" assert result["subject"] == "Test" assert "message_id" in result assert "timestamp" in result def test_send_email_increments_counter(self): """Test that email counter increments.""" initial_count = self.worker.emails_sent self.worker("send_email", to="a@x.com", subject="S", body="B") self.worker("send_email", to="b@x.com", subject="S", body="B") assert self.worker.emails_sent == initial_count + 2 def test_send_bulk_emails_streaming(self): """Test bulk email sending with progress streaming.""" recipients = ["a@x.com", "b@x.com", "c@x.com"] results = list(self.worker("send_bulk_emails", recipients=recipients, subject="Bulk", body="Hello all")) # Should have progress updates + final complete assert len(results) == len(recipients) + 1 # Check progress updates for i, r in enumerate(results[:-1]): assert r["type"] == "progress" assert r["current"] == i + 1 assert r["total"] == len(recipients) # Check final result final = results[-1] assert final["type"] == "complete" assert final["total"] == len(recipients) assert final["sent"] == len(recipients) def test_stats(self): """Test worker statistics.""" self.worker("send_email", to="x@x.com", subject="S", body="B") stats = self.worker("stats") assert stats["emails_sent"] >= 1 assert stats["smtp_connected"] is True assert "worker_pid" in stats def test_unknown_action_raises(self): """Test that unknown actions raise ValueError.""" with pytest.raises(ValueError, match="Unknown action"): self.worker("nonexistent_action") def test_private_method_raises(self): """Test that private methods cannot be called.""" with pytest.raises(ValueError, match="Unknown action"): self.worker("_connect_smtp") class TestImageWorker: """Tests for ImageWorker task class.""" def setup_method(self): """Set up test fixtures.""" self.worker = ImageWorker() self.worker.init() def test_resize_image(self): """Test image resizing.""" result = self.worker("resize", image_data="fake_image_data", width=800, height=600) assert result["status"] == "resized" assert result["target_dimensions"] == "800x600" assert "result_id" in result def test_generate_thumbnail(self): """Test thumbnail generation.""" result = self.worker("generate_thumbnail", image_data="fake_image_data", size=150) assert result["status"] == "resized" assert result["target_dimensions"] == "150x150" def test_process_batch_streaming(self): """Test batch processing with progress streaming.""" images = [ {"id": "img1", "data": b"data1"}, {"id": "img2", "data": b"data2"}, {"id": "img3", "data": b"data3"}, ] results = list(self.worker("process_batch", images=images, operation="resize", width=800, height=600)) # Progress for each image + complete assert len(results) == len(images) + 1 # Check progress updates for i, r in enumerate(results[:-1]): assert r["type"] == "progress" assert r["image_id"] == f"img{i+1}" assert "result" in r # Check final result final = results[-1] assert final["type"] == "complete" def test_stats(self): """Test worker statistics.""" self.worker("resize", image_data=b"x", width=100, height=100) stats = self.worker("stats") assert stats["images_processed"] >= 1 assert "pil_available" in stats assert "worker_pid" in stats class TestDataWorker: """Tests for DataWorker task class.""" def setup_method(self): """Set up test fixtures.""" self.worker = DataWorker() self.worker.init() def test_aggregate_sum(self): """Test data aggregation with sum.""" data = [ {"category": "A", "value": 10}, {"category": "B", "value": 20}, {"category": "A", "value": 30}, ] result = self.worker("aggregate", data=data, group_by="category", agg_field="value", agg_func="sum") assert result["status"] == "completed" assert result["result"]["A"] == 40 assert result["result"]["B"] == 20 def test_aggregate_count(self): """Test data aggregation with count.""" data = [ {"category": "A", "value": 10}, {"category": "B", "value": 20}, {"category": "A", "value": 30}, ] result = self.worker("aggregate", data=data, group_by="category", agg_field="value", agg_func="count") assert result["result"]["A"] == 2 assert result["result"]["B"] == 1 def test_etl_pipeline_streaming(self): """Test ETL pipeline with progress streaming.""" source_data = [ {"name": "alice", "status": "active"}, {"name": "bob", "status": "inactive"}, {"name": "charlie", "status": "active"}, ] transformations = [ {"name": "filter_active", "type": "filter", "field": "status", "value": "active"}, {"name": "uppercase", "type": "map", "field": "name", "func": "upper"}, ] results = list(self.worker("etl_pipeline", source_data=source_data, transformations=transformations)) # extract + transforms + load + complete expected_steps = 1 + len(transformations) + 1 + 1 assert len(results) == expected_steps # Check phases assert results[0]["phase"] == "extract" assert results[1]["phase"] == "transform" assert results[2]["phase"] == "transform" assert results[3]["phase"] == "load" assert results[4]["type"] == "complete" # Final should have 2 records (filtered) assert results[4]["records_output"] == 2 def test_cached_query_miss_then_hit(self): """Test query caching - miss then hit.""" # First call - cache miss result1 = self.worker("cached_query", query_key="test_query", ttl=300) assert result1["status"] == "cache_miss" # Second call - cache hit result2 = self.worker("cached_query", query_key="test_query", ttl=300) assert result2["status"] == "cache_hit" def test_stats(self): """Test worker statistics.""" self.worker("aggregate", data=[{"a": 1}], group_by="a", agg_field="a") stats = self.worker("stats") assert stats["tasks_completed"] >= 1 assert "cache_size" in stats assert stats["db_connected"] is True class TestScheduledWorker: """Tests for ScheduledWorker task class.""" def setup_method(self): """Set up test fixtures.""" self.worker = ScheduledWorker() def test_cleanup_old_files(self): """Test file cleanup task.""" result = self.worker("cleanup_old_files", directory="/tmp/test", max_age_days=7) assert result["status"] == "completed" assert result["directory"] == "/tmp/test" assert "files_deleted" in result assert "space_freed_mb" in result def test_generate_daily_report(self): """Test daily report generation.""" result = self.worker("generate_daily_report") assert result["status"] == "completed" assert "report_date" in result assert "metrics" in result assert "active_users" in result["metrics"] assert "new_signups" in result["metrics"] assert "revenue" in result["metrics"] def test_sync_external_data(self): """Test external data sync.""" result = self.worker("sync_external_data", source="test_api") assert result["status"] == "completed" assert result["source"] == "test_api" assert "records_synced" in result def test_stats_tracks_runs(self): """Test that stats tracks task runs.""" self.worker("cleanup_old_files", directory="/tmp", max_age_days=1) self.worker("cleanup_old_files", directory="/tmp", max_age_days=1) self.worker("generate_daily_report") stats = self.worker("stats") assert stats["run_counts"]["cleanup_old_files"] == 2 assert stats["run_counts"]["generate_daily_report"] == 1 assert "cleanup_old_files" in stats["last_runs"] benoitc-gunicorn-f5fb19e/examples/deep/000077500000000000000000000000001514360242400202165ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/deep/__init__.py000066400000000000000000000001521514360242400223250ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/deep/test.py000066400000000000000000000011751514360242400215530ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Example code from Eventlet sources from wsgiref.validate import validator from gunicorn import __version__ @validator def app(environ, start_response): """Simplest possible application object""" data = b'Hello, World!\n' status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', str(len(data))), ('X-Gunicorn-Version', __version__), ('Foo', 'B\u00e5r'), # Foo: Bår ] start_response(status, response_headers) return iter([data]) benoitc-gunicorn-f5fb19e/examples/dirty_example/000077500000000000000000000000001514360242400221475ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/dirty_example/Dockerfile000066400000000000000000000010751514360242400241440ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. FROM python:3.12-slim WORKDIR /app # Copy gunicorn source COPY . /app/gunicorn-src # Install gunicorn and dependencies # setproctitle is needed for process title changes RUN pip install --no-cache-dir /app/gunicorn-src setproctitle # Copy example files COPY examples/dirty_example/ /app/examples/dirty_example/ WORKDIR /app # Expose the port EXPOSE 8000 # Default command - run the example tests CMD ["python", "-m", "pytest", "-v", "examples/dirty_example/"] benoitc-gunicorn-f5fb19e/examples/dirty_example/__init__.py000066400000000000000000000001521514360242400242560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/dirty_example/dirty_app.py000066400000000000000000000207671514360242400245300ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Example Dirty Application - Simulates ML Model Loading and Inference This demonstrates how to create a DirtyApp that: 1. Loads "models" at startup (init) 2. Handles requests from HTTP workers (__call__) 3. Cleans up on shutdown (close) """ import os import time import hashlib from gunicorn.dirty.app import DirtyApp from gunicorn.dirty import stash class MLApp(DirtyApp): """ Example dirty application that simulates ML model operations. In a real application, this would load actual ML models like: - PyTorch models - TensorFlow models - Scikit-learn models - LLM models (Hugging Face, etc.) """ def __init__(self): self.models = {} self.load_count = 0 self.inference_count = 0 def init(self): """Called once when dirty worker starts.""" print(f"[MLApp] Initializing... (pid: {__import__('os').getpid()})") # Simulate loading a default model (takes time) self._load_model("default") print(f"[MLApp] Initialization complete. Models loaded: {list(self.models.keys())}") def __call__(self, action, *args, **kwargs): """Dispatch to action methods.""" method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def _load_model(self, name): """Simulate loading a model (expensive operation).""" print(f"[MLApp] Loading model '{name}'...") # Simulate model loading time time.sleep(0.5) # Create a fake "model" object self.models[name] = { "name": name, "loaded_at": time.time(), "version": "1.0.0", "parameters": 1_000_000, # Simulated parameter count } self.load_count += 1 print(f"[MLApp] Model '{name}' loaded successfully") return self.models[name] def load_model(self, name): """Load a model into memory (called from HTTP workers).""" if name in self.models: return {"status": "already_loaded", "model": self.models[name]} model = self._load_model(name) return {"status": "loaded", "model": model} def list_models(self): """List all loaded models.""" return { "models": list(self.models.keys()), "count": len(self.models), "total_loads": self.load_count, "total_inferences": self.inference_count, } def inference(self, model_name, input_data): """Run inference on a loaded model.""" if model_name not in self.models: raise ValueError(f"Model not loaded: {model_name}") model = self.models[model_name] self.inference_count += 1 # Simulate inference (compute a hash as a "prediction") time.sleep(0.1) # Simulate computation time result = { "model": model_name, "input_hash": hashlib.md5(str(input_data).encode()).hexdigest()[:8], "prediction": f"result_{self.inference_count}", "confidence": 0.95, "inference_time_ms": 100, } return result def unload_model(self, name): """Unload a model from memory.""" if name not in self.models: return {"status": "not_found", "name": name} del self.models[name] return {"status": "unloaded", "name": name} def close(self): """Cleanup on shutdown.""" print(f"[MLApp] Shutting down. Total inferences: {self.inference_count}") self.models.clear() class ComputeApp(DirtyApp): """ Example dirty application for CPU-intensive computations. This demonstrates operations that would block HTTP workers but are fine in dirty workers. """ def __init__(self): self.computation_count = 0 def init(self): print(f"[ComputeApp] Initialized (pid: {__import__('os').getpid()})") def __call__(self, action, *args, **kwargs): method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def fibonacci(self, n): """Compute fibonacci number (CPU-intensive for large n).""" self.computation_count += 1 if n <= 1: return {"n": n, "result": n, "computation_id": self.computation_count} a, b = 0, 1 for _ in range(2, n + 1): a, b = b, a + b return {"n": n, "result": b, "computation_id": self.computation_count} def prime_check(self, n): """Check if a number is prime (CPU-intensive for large n).""" self.computation_count += 1 if n < 2: is_prime = False elif n == 2: is_prime = True elif n % 2 == 0: is_prime = False else: is_prime = True for i in range(3, int(n**0.5) + 1, 2): if n % i == 0: is_prime = False break return {"n": n, "is_prime": is_prime, "computation_id": self.computation_count} def stats(self): """Get computation statistics.""" return {"total_computations": self.computation_count} def close(self): print(f"[ComputeApp] Shutting down. Total computations: {self.computation_count}") class SessionApp(DirtyApp): """ Example dirty application demonstrating stash (shared state). This shows how multiple dirty workers can share state through the arbiter's stash tables. All workers see the same data. """ # Declare stash tables used by this app (auto-created on startup) stashes = ["sessions", "counters"] def __init__(self): self.worker_pid = None def init(self): self.worker_pid = os.getpid() print(f"[SessionApp] Initialized on worker {self.worker_pid}") # Initialize a global counter if it doesn't exist if not stash.exists("counters", "requests"): stash.put("counters", "requests", 0) def __call__(self, action, *args, **kwargs): method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def login(self, user_id, user_data): """Store user session in shared stash.""" session = { "user_id": user_id, "data": user_data, "logged_in_at": time.time(), "worker_pid": self.worker_pid, } stash.put("sessions", f"user:{user_id}", session) self._increment_counter() return {"status": "ok", "session": session} def logout(self, user_id): """Remove user session.""" key = f"user:{user_id}" if stash.exists("sessions", key): stash.delete("sessions", key) self._increment_counter() return {"status": "logged_out", "user_id": user_id} return {"status": "not_found", "user_id": user_id} def get_session(self, user_id): """Get user session - visible from any worker.""" session = stash.get("sessions", f"user:{user_id}") self._increment_counter() return { "session": session, "served_by_worker": self.worker_pid, } def list_sessions(self): """List all active sessions.""" keys = stash.keys("sessions", pattern="user:*") sessions = [] for key in keys: sessions.append(stash.get("sessions", key)) self._increment_counter() return { "sessions": sessions, "count": len(sessions), "served_by_worker": self.worker_pid, } def get_stats(self): """Get global request counter (shared across all workers).""" count = stash.get("counters", "requests", 0) return { "total_requests": count, "served_by_worker": self.worker_pid, } def _increment_counter(self): """Increment global request counter.""" current = stash.get("counters", "requests", 0) stash.put("counters", "requests", current + 1) def clear_all(self): """Clear all sessions (for testing).""" stash.clear("sessions") stash.put("counters", "requests", 0) return {"status": "cleared"} def close(self): print(f"[SessionApp] Shutting down worker {self.worker_pid}") benoitc-gunicorn-f5fb19e/examples/dirty_example/docker-compose.yml000066400000000000000000000037001514360242400256040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. services: # Run the example tests (protocol, dirty app, worker integration) tests: build: context: ../.. dockerfile: examples/dirty_example/Dockerfile command: > bash -c " echo '=== Running Protocol Tests ===' && python examples/dirty_example/test_protocol.py && echo '' && echo '=== Running Dirty App Tests ===' && python examples/dirty_example/test_dirty_app.py && echo '' && echo '=== Running Worker Integration Tests ===' && python examples/dirty_example/test_worker_integration.py && echo '' && echo '=== All tests passed! ===' " # Run the full gunicorn server with dirty workers server: build: context: ../.. dockerfile: examples/dirty_example/Dockerfile ports: - "8001:8000" environment: - GUNICORN_BIND=0.0.0.0:8000 command: > gunicorn examples.dirty_example.wsgi_app:app -c examples/dirty_example/gunicorn_conf.py healthcheck: test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/')"] interval: 5s timeout: 5s retries: 5 start_period: 10s # Run integration test against the server integration-test: build: context: ../.. dockerfile: examples/dirty_example/Dockerfile depends_on: server: condition: service_healthy environment: - TEST_BASE_URL=http://server:8000 command: python examples/dirty_example/test_integration.py # Run stash integration test against the server stash-test: build: context: ../.. dockerfile: examples/dirty_example/Dockerfile depends_on: server: condition: service_healthy environment: - TEST_BASE_URL=http://server:8000 command: python examples/dirty_example/test_stash_integration.py benoitc-gunicorn-f5fb19e/examples/dirty_example/gunicorn_conf.py000066400000000000000000000026511514360242400253560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn configuration for Dirty Workers Example Run with: cd examples/dirty_example gunicorn wsgi_app:app -c gunicorn_conf.py """ # Basic settings # Use 0.0.0.0 for Docker, override with GUNICORN_BIND env var if needed import os bind = os.environ.get("GUNICORN_BIND", "127.0.0.1:8000") workers = 2 worker_class = "sync" timeout = 30 # Dirty arbiter settings dirty_apps = [ "examples.dirty_example.dirty_app:MLApp", "examples.dirty_example.dirty_app:ComputeApp", "examples.dirty_example.dirty_app:SessionApp", ] dirty_workers = 2 dirty_timeout = 300 dirty_graceful_timeout = 30 # Logging loglevel = "info" accesslog = "-" errorlog = "-" # Hooks for demonstration def on_starting(server): print("=== Gunicorn starting ===") def when_ready(server): print("=== Gunicorn ready ===") print(f"HTTP workers: {server.num_workers}") print(f"Dirty workers: {server.cfg.dirty_workers}") print(f"Dirty apps: {server.cfg.dirty_apps}") def on_dirty_starting(arbiter): print("=== Dirty arbiter starting ===") def dirty_post_fork(arbiter, worker): print(f"=== Dirty worker {worker.pid} forked ===") def dirty_worker_init(worker): print(f"=== Dirty worker {worker.pid} initialized apps ===") def dirty_worker_exit(arbiter, worker): print(f"=== Dirty worker {worker.pid} exiting ===") benoitc-gunicorn-f5fb19e/examples/dirty_example/test_dirty_app.py000066400000000000000000000076761514360242400255730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python """ Test script to demonstrate Dirty App functionality directly. This tests the dirty app without running the full gunicorn server. Run with: python examples/dirty_example/test_dirty_app.py """ import sys import os # Add parent directory to path sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from examples.dirty_example.dirty_app import MLApp, ComputeApp def test_ml_app(): """Test the MLApp dirty application.""" print("=" * 60) print("Testing MLApp") print("=" * 60) # Create and initialize the app app = MLApp() print("\n1. Initializing app (loads default model)...") app.init() # List models print("\n2. Listing models...") result = app("list_models") print(f" Models: {result}") # Load another model print("\n3. Loading 'gpt-4' model...") result = app("load_model", "gpt-4") print(f" Result: {result}") # List models again print("\n4. Listing models again...") result = app("list_models") print(f" Models: {result}") # Run inference print("\n5. Running inference on 'default' model...") result = app("inference", "default", "Hello, world!") print(f" Result: {result}") # Run more inferences print("\n6. Running more inferences...") for i in range(3): result = app("inference", "gpt-4", f"Input data {i}") print(f" Inference {i+1}: {result['prediction']}") # Unload a model print("\n7. Unloading 'gpt-4' model...") result = app("unload_model", "gpt-4") print(f" Result: {result}") # Final stats print("\n8. Final stats...") result = app("list_models") print(f" {result}") # Close print("\n9. Closing app...") app.close() print("\n" + "=" * 60) print("MLApp test complete!") print("=" * 60) def test_compute_app(): """Test the ComputeApp dirty application.""" print("\n" + "=" * 60) print("Testing ComputeApp") print("=" * 60) # Create and initialize app = ComputeApp() app.init() # Fibonacci print("\n1. Computing Fibonacci numbers...") for n in [10, 20, 30, 40]: result = app("fibonacci", n) print(f" fib({n}) = {result['result']}") # Prime checks print("\n2. Checking prime numbers...") for n in [17, 100, 997, 1000]: result = app("prime_check", n) status = "is prime" if result['is_prime'] else "is NOT prime" print(f" {n} {status}") # Stats print("\n3. Stats...") result = app("stats") print(f" {result}") # Close app.close() print("\n" + "=" * 60) print("ComputeApp test complete!") print("=" * 60) def test_error_handling(): """Test error handling in dirty apps.""" print("\n" + "=" * 60) print("Testing Error Handling") print("=" * 60) app = MLApp() app.init() # Try to run inference on non-existent model print("\n1. Trying inference on non-existent model...") try: app("inference", "nonexistent", "data") except ValueError as e: print(f" Caught expected error: {e}") # Try unknown action print("\n2. Trying unknown action...") try: app("unknown_action") except ValueError as e: print(f" Caught expected error: {e}") # Try private method print("\n3. Trying private method...") try: app("_load_model", "test") except ValueError as e: print(f" Caught expected error: {e}") app.close() print("\n" + "=" * 60) print("Error handling test complete!") print("=" * 60) if __name__ == "__main__": print("\n" + "#" * 60) print("# Dirty App Demonstration") print("#" * 60) test_ml_app() test_compute_app() test_error_handling() print("\n" + "#" * 60) print("# All tests passed!") print("#" * 60 + "\n") benoitc-gunicorn-f5fb19e/examples/dirty_example/test_integration.py000066400000000000000000000042041514360242400261030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python """ Integration test for the dirty example server. This tests that the full gunicorn server with dirty workers responds correctly to HTTP requests. Run with: python examples/dirty_example/test_integration.py [base_url] Default base_url is http://localhost:8000 """ import sys import os import json import urllib.request import urllib.error def test_endpoint(base, path, expected_key=None): """Test an endpoint and check for expected key in response.""" url = base + path print(f"Testing: {url}") try: with urllib.request.urlopen(url, timeout=10) as resp: data = json.loads(resp.read()) print(f" Response: {str(data)[:200]}") if expected_key and expected_key not in data: print(f" ERROR: Expected key '{expected_key}' not found!") return False return True except urllib.error.HTTPError as e: print(f" HTTP ERROR {e.code}: {e.reason}") return False except Exception as e: print(f" ERROR: {e}") return False def main(): # Get base URL from env or command line base = os.environ.get("TEST_BASE_URL", "http://localhost:8000") if len(sys.argv) > 1: base = sys.argv[1] print(f"Testing dirty example server at: {base}") print("=" * 60) # Define tests: (path, expected_key_in_response) tests = [ ("/", "endpoints"), ("/models", "models"), ("/load?name=test-model", "status"), ("/inference?model=default&data=hello", "prediction"), ("/fibonacci?n=20", "result"), ("/prime?n=17", "is_prime"), ("/stats", "ml_app"), ("/unload?name=test-model", "status"), ] failed = 0 for path, key in tests: if not test_endpoint(base, path, key): failed += 1 print() print("=" * 60) if failed: print(f"FAILED: {failed} tests failed") sys.exit(1) else: print("SUCCESS: All integration tests passed!") if __name__ == "__main__": main() benoitc-gunicorn-f5fb19e/examples/dirty_example/test_protocol.py000066400000000000000000000174551514360242400254350ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python """ Test script to demonstrate the Dirty Binary Protocol layer. The binary protocol uses a 16-byte header + TLV-encoded payloads for efficient binary data transfer without base64 encoding overhead. Run with: python examples/dirty_example/test_protocol.py """ import sys import os import asyncio import socket sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from gunicorn.dirty.protocol import ( BinaryProtocol, DirtyProtocol, make_request, make_response, make_error_response, HEADER_SIZE, MAGIC, VERSION, ) from gunicorn.dirty.errors import DirtyError, DirtyTimeoutError def test_protocol_encode_decode(): """Test protocol encoding and decoding.""" print("=" * 60) print("Testing Binary Protocol Encode/Decode") print("=" * 60) # Test request with integer ID (recommended for binary protocol) print("\n1. Creating a request message...") request = make_request( request_id=12345, # Integer IDs are efficient app_path="myapp.ml:MLApp", action="inference", args=("model1",), kwargs={"temperature": 0.7} ) print(f" Request: {request}") # Encode using binary protocol print("\n2. Encoding message with binary protocol...") encoded = BinaryProtocol._encode_from_dict(request) print(f" Encoded length: {len(encoded)} bytes") print(f" Header ({HEADER_SIZE} bytes): {encoded[:HEADER_SIZE].hex()}") print(f" Magic: {MAGIC!r}") print(f" Version: {VERSION}") # Decode header print("\n3. Decoding header...") msg_type, request_id, payload_len = BinaryProtocol.decode_header(encoded[:HEADER_SIZE]) print(f" Message type: {msg_type} (0x{msg_type:02x})") print(f" Request ID: {request_id}") print(f" Payload length: {payload_len} bytes") # Decode full message print("\n4. Decoding full message...") msg_type_str, req_id, payload = BinaryProtocol.decode_message(encoded) print(f" Type: {msg_type_str}") print(f" Request ID: {req_id}") print(f" Payload: {payload}") def test_binary_data_handling(): """Test binary data handling - the main advantage of binary protocol.""" print("\n" + "=" * 60) print("Testing Binary Data Handling") print("=" * 60) # Create binary data (e.g., image, audio, model weights) binary_data = bytes(range(256)) # All byte values print(f"\n1. Original binary data: {len(binary_data)} bytes") print(f" First 16 bytes: {binary_data[:16].hex()}") # Create response with binary data (no base64 needed!) print("\n2. Encoding binary data in response...") response = make_response(67890, {"image_data": binary_data, "format": "raw"}) encoded = BinaryProtocol._encode_from_dict(response) print(f" Encoded total size: {len(encoded)} bytes") # Decode and verify print("\n3. Decoding binary data...") msg_type_str, req_id, payload = BinaryProtocol.decode_message(encoded) recovered_data = payload["result"]["image_data"] print(f" Recovered data size: {len(recovered_data)} bytes") print(f" Data matches: {recovered_data == binary_data}") print(f" First 16 bytes: {recovered_data[:16].hex()}") def test_protocol_response(): """Test response message building.""" print("\n" + "=" * 60) print("Testing Response Messages") print("=" * 60) # Success response print("\n1. Creating success response...") response = make_response(12345, {"result": "Hello, World!", "confidence": 0.95}) print(f" Response: {response}") # Error response print("\n2. Creating error response...") error = DirtyTimeoutError("Operation timed out", timeout=30) error_response = make_error_response(12345, error) print(f" Error response: {error_response}") def test_socket_communication(): """Test sync protocol over actual sockets.""" print("\n" + "=" * 60) print("Testing Socket Communication") print("=" * 60) # Create a socket pair server_sock, client_sock = socket.socketpair() try: # Send a request print("\n1. Sending request over socket...") request = make_request( request_id=100001, app_path="test:App", action="compute", args=(1, 2, 3), kwargs={} ) DirtyProtocol.write_message(client_sock, request) print(f" Sent: {request}") # Receive the request print("\n2. Receiving request...") received = DirtyProtocol.read_message(server_sock) print(f" Received: {received}") print(f" Request ID: {received['id']}") # Send a response with binary data print("\n3. Sending response with binary data...") binary_result = b"\x00\x01\x02\x03\xff\xfe\xfd\xfc" response = make_response(100001, {"data": binary_result, "sum": 6}) DirtyProtocol.write_message(server_sock, response) print(f" Sent binary data: {binary_result.hex()}") # Receive the response print("\n4. Receiving response...") received = DirtyProtocol.read_message(client_sock) print(f" Received binary data: {received['result']['data'].hex()}") print(f" Sum: {received['result']['sum']}") finally: server_sock.close() client_sock.close() async def test_async_communication(): """Test async protocol over streams.""" print("\n" + "=" * 60) print("Testing Async Communication") print("=" * 60) # Use a pipe for async testing read_fd, write_fd = os.pipe() try: # Create message request = make_request( request_id=200001, app_path="async:App", action="process", args=("data",), kwargs={"async": True} ) # Write to pipe print("\n1. Writing async message...") encoded = BinaryProtocol._encode_from_dict(request) os.write(write_fd, encoded) os.close(write_fd) write_fd = None print(f" Wrote {len(encoded)} bytes") # Read from pipe using async reader print("\n2. Reading async message...") reader = asyncio.StreamReader() data = os.read(read_fd, len(encoded)) reader.feed_data(data) reader.feed_eof() received = await DirtyProtocol.read_message_async(reader) print(f" Received: {received}") print(f" Request ID: {received['id']}") finally: if write_fd is not None: os.close(write_fd) os.close(read_fd) def test_error_serialization(): """Test error serialization and deserialization.""" print("\n" + "=" * 60) print("Testing Error Serialization") print("=" * 60) # Create various errors errors = [ DirtyError("Generic error", {"code": 500}), DirtyTimeoutError("Timeout!", timeout=60), ] for error in errors: print(f"\n1. Original error: {error}") print(f" Type: {type(error).__name__}") # Serialize error_dict = error.to_dict() print(f"2. Serialized: {error_dict}") # Deserialize restored = DirtyError.from_dict(error_dict) print(f"3. Restored: {restored}") print(f" Type: {type(restored).__name__}") print(f" Match type: {type(restored) == type(error)}") if __name__ == "__main__": print("\n" + "#" * 60) print("# Dirty Binary Protocol Demonstration") print("#" * 60) test_protocol_encode_decode() test_binary_data_handling() test_protocol_response() test_socket_communication() asyncio.run(test_async_communication()) test_error_serialization() print("\n" + "#" * 60) print("# All protocol tests passed!") print("#" * 60 + "\n") benoitc-gunicorn-f5fb19e/examples/dirty_example/test_stash_integration.py000066400000000000000000000150711514360242400273110ustar00rootroot00000000000000#!/usr/bin/env python3 # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Integration tests for stash (shared state) functionality. These tests verify that stash works correctly across multiple dirty workers, demonstrating that state is truly shared. Run with Docker: docker-compose up --build docker-compose exec app python test_stash_integration.py """ import json import os import sys import urllib.request import urllib.error BASE_URL = os.environ.get("TEST_BASE_URL", "http://localhost:8000") def request(path): """Make HTTP request and return JSON response.""" url = f"{BASE_URL}{path}" try: with urllib.request.urlopen(url, timeout=10) as resp: return json.loads(resp.read().decode()) except urllib.error.HTTPError as e: return {"error": str(e), "code": e.code} except urllib.error.URLError as e: return {"error": str(e)} def test_stash_shared_state(): """Test that stash state is shared across workers.""" print("\n=== Test: Stash Shared State ===") # Clear any existing state result = request("/session/clear") print(f"Clear: {result}") # Login a user result = request("/session/login?user_id=100&name=Alice") print(f"Login Alice: {result}") assert result.get("status") == "ok", f"Login failed: {result}" worker1 = result.get("session", {}).get("worker_pid") print(f" -> Handled by worker: {worker1}") # Make multiple requests to potentially hit different workers # and verify they all see the same session workers_seen = set() for i in range(5): result = request("/session/get?user_id=100") worker = result.get("served_by_worker") workers_seen.add(worker) session = result.get("session") assert session is not None, f"Session not found on request {i+1}" assert session.get("data", {}).get("name") == "Alice", f"Wrong session data" print(f" -> Session visible from workers: {workers_seen}") print("PASSED: State is shared across workers") return True def test_stash_counter(): """Test that global counter increments correctly.""" print("\n=== Test: Global Counter ===") # Clear state request("/session/clear") # Get initial stats result = request("/session/stats") initial = result.get("total_requests", 0) print(f"Initial count: {initial}") # Make several requests for i in range(5): request(f"/session/login?user_id={i}&name=User{i}") # Check counter increased result = request("/session/stats") final = result.get("total_requests", 0) print(f"Final count: {final}") # Each login increments counter by 1 assert final >= initial + 5, f"Counter didn't increment enough: {initial} -> {final}" print("PASSED: Global counter works across workers") return True def test_stash_list_sessions(): """Test listing all sessions.""" print("\n=== Test: List Sessions ===") # Clear and create some sessions request("/session/clear") request("/session/login?user_id=1&name=Alice") request("/session/login?user_id=2&name=Bob") request("/session/login?user_id=3&name=Charlie") # List all sessions result = request("/session/list") sessions = result.get("sessions", []) count = result.get("count", 0) print(f"Sessions: {count}") for s in sessions: print(f" - user:{s.get('user_id')} = {s.get('data', {}).get('name')}") assert count == 3, f"Expected 3 sessions, got {count}" print("PASSED: List sessions works") return True def test_stash_logout(): """Test session deletion.""" print("\n=== Test: Logout (Delete) ===") # Clear and create a session request("/session/clear") request("/session/login?user_id=999&name=TestUser") # Verify it exists result = request("/session/get?user_id=999") assert result.get("session") is not None, "Session should exist" # Logout result = request("/session/logout?user_id=999") print(f"Logout: {result}") assert result.get("status") == "logged_out", f"Logout failed: {result}" # Verify it's gone result = request("/session/get?user_id=999") assert result.get("session") is None, "Session should be deleted" print("PASSED: Logout deletes session") return True def test_multiple_workers_see_updates(): """Test that updates from one worker are visible to others.""" print("\n=== Test: Cross-Worker Updates ===") request("/session/clear") # Create sessions and track which workers handled them workers = {} for i in range(10): result = request(f"/session/login?user_id={i}&name=User{i}") worker = result.get("session", {}).get("worker_pid") workers[i] = worker unique_workers = set(workers.values()) print(f"Sessions created by workers: {unique_workers}") # Now read all sessions and verify all workers can see all data result = request("/session/list") count = result.get("count", 0) served_by = result.get("served_by_worker") print(f"List returned {count} sessions, served by worker {served_by}") assert count == 10, f"Expected 10 sessions, got {count}" print("PASSED: All workers see all updates") return True def main(): """Run all tests.""" print("=" * 60) print("Stash Integration Tests") print("=" * 60) # Check server is running try: result = request("/") if "error" in result and "Connection refused" in str(result.get("error", "")): print("ERROR: Server not running. Start with: docker-compose up") return 1 if not result.get("dirty_enabled"): print("ERROR: Dirty workers not enabled") return 1 print(f"Server running, dirty workers enabled") except Exception as e: print(f"ERROR: Cannot connect to server: {e}") return 1 # Run tests tests = [ test_stash_shared_state, test_stash_counter, test_stash_list_sessions, test_stash_logout, test_multiple_workers_see_updates, ] passed = 0 failed = 0 for test in tests: try: if test(): passed += 1 else: failed += 1 except AssertionError as e: print(f"FAILED: {e}") failed += 1 except Exception as e: print(f"ERROR: {e}") failed += 1 print("\n" + "=" * 60) print(f"Results: {passed} passed, {failed} failed") print("=" * 60) return 0 if failed == 0 else 1 if __name__ == "__main__": sys.exit(main()) benoitc-gunicorn-f5fb19e/examples/dirty_example/test_worker_integration.py000066400000000000000000000207371514360242400275050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python """ Integration test demonstrating DirtyWorker execution. This test demonstrates how the DirtyWorker loads apps and handles requests without actually forking processes (suitable for a quick test). Run with: python examples/dirty_example/test_worker_integration.py """ import sys import os import asyncio import tempfile sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from gunicorn.config import Config from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.protocol import DirtyProtocol, BinaryProtocol, make_request, HEADER_SIZE class MockLog: """Mock logger for testing.""" def debug(self, msg, *args): print(f"[DEBUG] {msg % args if args else msg}") def info(self, msg, *args): print(f"[INFO] {msg % args if args else msg}") def warning(self, msg, *args): print(f"[WARN] {msg % args if args else msg}") def error(self, msg, *args): print(f"[ERROR] {msg % args if args else msg}") def close_on_exec(self): pass def reopen_files(self): pass class MockWriter: """Mock StreamWriter that captures written responses.""" def __init__(self): self.messages = [] self._buffer = b"" def write(self, data): self._buffer += data async def drain(self): # Decode messages from buffer using binary protocol while len(self._buffer) >= HEADER_SIZE: _, _, length = BinaryProtocol.decode_header(self._buffer[:HEADER_SIZE]) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def get_last_response(self): """Get the last response message.""" return self.messages[-1] if self.messages else None async def test_worker_request_handling(): """Test that a worker can load apps and handle requests.""" print("=" * 60) print("Testing DirtyWorker Request Handling") print("=" * 60) # Create config and worker cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["examples.dirty_example.dirty_app:MLApp"], cfg=cfg, log=log, socket_path=socket_path ) # Load apps (normally done in init_process after fork) print("\n1. Loading apps...") worker.load_apps() print(f" Loaded apps: {list(worker.apps.keys())}") # Test execute directly print("\n2. Testing execute() - list_models...") result = await worker.execute( "examples.dirty_example.dirty_app:MLApp", "list_models", [], {} ) print(f" Result: {result}") # Test handle_request with a proper request message print("\n3. Testing handle_request() - load_model...") request = make_request( request_id=1001, app_path="examples.dirty_example.dirty_app:MLApp", action="load_model", args=("gpt-4",), kwargs={} ) writer = MockWriter() await worker.handle_request(request, writer) response = writer.get_last_response() print(f" Response type: {response['type']}") print(f" Result: {response.get('result', response.get('error'))}") # Test inference print("\n4. Testing handle_request() - inference...") request = make_request( request_id=1002, app_path="examples.dirty_example.dirty_app:MLApp", action="inference", args=("default", "Hello AI!"), kwargs={} ) writer = MockWriter() await worker.handle_request(request, writer) response = writer.get_last_response() print(f" Response type: {response['type']}") print(f" Result: {response.get('result', response.get('error'))}") # Test error handling print("\n5. Testing error handling - unknown action...") request = make_request( request_id=1003, app_path="examples.dirty_example.dirty_app:MLApp", action="nonexistent_action", args=(), kwargs={} ) writer = MockWriter() await worker.handle_request(request, writer) response = writer.get_last_response() print(f" Response type: {response['type']}") print(f" Error: {response.get('error', {}).get('message')}") # Test app not found print("\n6. Testing error handling - app not found...") request = make_request( request_id=1004, app_path="nonexistent:App", action="test", args=(), kwargs={} ) writer = MockWriter() await worker.handle_request(request, writer) response = writer.get_last_response() print(f" Response type: {response['type']}") print(f" Error type: {response.get('error', {}).get('error_type')}") # Cleanup print("\n7. Cleanup...") worker._cleanup() print(" Done!") async def test_worker_with_compute_app(): """Test worker with ComputeApp.""" print("\n" + "=" * 60) print("Testing DirtyWorker with ComputeApp") print("=" * 60) cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["examples.dirty_example.dirty_app:ComputeApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() # Fibonacci print("\n1. Computing Fibonacci(30)...") result = await worker.execute( "examples.dirty_example.dirty_app:ComputeApp", "fibonacci", [30], {} ) print(f" Result: {result}") # Prime check print("\n2. Checking if 997 is prime...") result = await worker.execute( "examples.dirty_example.dirty_app:ComputeApp", "prime_check", [997], {} ) print(f" Result: {result}") worker._cleanup() async def test_multiple_apps(): """Test worker with multiple apps loaded.""" print("\n" + "=" * 60) print("Testing DirtyWorker with Multiple Apps") print("=" * 60) cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[ "examples.dirty_example.dirty_app:MLApp", "examples.dirty_example.dirty_app:ComputeApp", ], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() print(f"\n1. Loaded {len(worker.apps)} apps: {list(worker.apps.keys())}") # Use both apps print("\n2. Using MLApp for inference...") result = await worker.execute( "examples.dirty_example.dirty_app:MLApp", "inference", ["default", "test input"], {} ) print(f" MLApp result: {result['prediction']}") print("\n3. Using ComputeApp for fibonacci...") result = await worker.execute( "examples.dirty_example.dirty_app:ComputeApp", "fibonacci", [15], {} ) print(f" ComputeApp result: fib(15) = {result['result']}") worker._cleanup() if __name__ == "__main__": print("\n" + "#" * 60) print("# DirtyWorker Integration Demonstration") print("#" * 60) asyncio.run(test_worker_request_handling()) asyncio.run(test_worker_with_compute_app()) asyncio.run(test_multiple_apps()) print("\n" + "#" * 60) print("# All integration tests passed!") print("#" * 60 + "\n") benoitc-gunicorn-f5fb19e/examples/dirty_example/wsgi_app.py000066400000000000000000000174641514360242400243460ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Example WSGI Application that uses Dirty Workers This demonstrates how HTTP workers can call dirty workers for heavy operations like ML inference. Run with: cd examples/dirty_example gunicorn wsgi_app:app -c gunicorn_conf.py """ import json import os from urllib.parse import parse_qs def get_dirty_client(): """Get the dirty client, with fallback for when dirty workers aren't enabled.""" try: from gunicorn.dirty import get_dirty_client as _get_dirty_client return _get_dirty_client() except Exception as e: return None def app(environ, start_response): """WSGI application that demonstrates dirty worker integration.""" path = environ.get('PATH_INFO', '/') method = environ.get('REQUEST_METHOD', 'GET') # Parse query string query = parse_qs(environ.get('QUERY_STRING', '')) # Get dirty client client = get_dirty_client() try: if path == '/': result = { "message": "Dirty Workers Demo", "dirty_enabled": client is not None, "pid": os.getpid(), "endpoints": { "/models": "List loaded models", "/load?name=MODEL": "Load a model", "/inference?model=NAME&data=INPUT": "Run inference", "/unload?name=MODEL": "Unload a model", "/fibonacci?n=NUMBER": "Compute fibonacci", "/prime?n=NUMBER": "Check if prime", "/stats": "Get dirty worker stats", "/session/login?user_id=ID&name=NAME": "Login user (stash demo)", "/session/get?user_id=ID": "Get session (stash demo)", "/session/list": "List all sessions (stash demo)", "/session/logout?user_id=ID": "Logout user (stash demo)", "/session/stats": "Get stash stats (stash demo)", } } elif path == '/models': if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:MLApp", "list_models" ) elif path == '/load': name = query.get('name', ['model1'])[0] if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:MLApp", "load_model", name ) elif path == '/inference': model = query.get('model', ['default'])[0] data = query.get('data', ['test input'])[0] if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:MLApp", "inference", model, data ) elif path == '/unload': name = query.get('name', ['model1'])[0] if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:MLApp", "unload_model", name ) elif path == '/fibonacci': n = int(query.get('n', ['10'])[0]) if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:ComputeApp", "fibonacci", n ) elif path == '/prime': n = int(query.get('n', ['17'])[0]) if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:ComputeApp", "prime_check", n ) elif path == '/stats': if client is None: result = {"error": "Dirty workers not enabled"} else: ml_stats = client.execute( "examples.dirty_example.dirty_app:MLApp", "list_models" ) compute_stats = client.execute( "examples.dirty_example.dirty_app:ComputeApp", "stats" ) result = { "ml_app": ml_stats, "compute_app": compute_stats, "http_worker_pid": os.getpid(), } # ===================================================================== # Session endpoints (stash demo) # ===================================================================== elif path == '/session/login': user_id = query.get('user_id', ['1'])[0] name = query.get('name', ['Anonymous'])[0] if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:SessionApp", "login", user_id=user_id, user_data={"name": name} ) elif path == '/session/get': user_id = query.get('user_id', ['1'])[0] if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:SessionApp", "get_session", user_id=user_id ) elif path == '/session/list': if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:SessionApp", "list_sessions" ) elif path == '/session/logout': user_id = query.get('user_id', ['1'])[0] if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:SessionApp", "logout", user_id=user_id ) elif path == '/session/stats': if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:SessionApp", "get_stats" ) elif path == '/session/clear': if client is None: result = {"error": "Dirty workers not enabled"} else: result = client.execute( "examples.dirty_example.dirty_app:SessionApp", "clear_all" ) else: start_response('404 Not Found', [('Content-Type', 'application/json')]) return [json.dumps({"error": "Not found"}).encode()] # Success response start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps(result, indent=2).encode()] except Exception as e: start_response('500 Internal Server Error', [('Content-Type', 'application/json')]) return [json.dumps({ "error": str(e), "type": type(e).__name__ }).encode()] benoitc-gunicorn-f5fb19e/examples/echo.py000066400000000000000000000012161514360242400205710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Example code from Eventlet sources from gunicorn import __version__ def app(environ, start_response): """Simplest possible application object""" if environ['REQUEST_METHOD'].upper() != 'POST': data = b'Hello, World!\n' else: data = environ['wsgi.input'].read() status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', str(len(data))), ('X-Gunicorn-Version', __version__) ] start_response(status, response_headers) return iter([data]) benoitc-gunicorn-f5fb19e/examples/embedding_service/000077500000000000000000000000001514360242400227375ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/embedding_service/Dockerfile000066400000000000000000000006471514360242400247400ustar00rootroot00000000000000FROM python:3.12-slim WORKDIR /app # Install dependencies RUN pip install --no-cache-dir \ sentence-transformers \ fastapi \ pydantic # Copy gunicorn source COPY . /app/gunicorn-src RUN pip install /app/gunicorn-src # Copy app COPY examples/embedding_service /app/embedding_service ENV PYTHONPATH=/app EXPOSE 8000 CMD ["gunicorn", "embedding_service.main:app", "-c", "embedding_service/gunicorn_conf.py"] benoitc-gunicorn-f5fb19e/examples/embedding_service/README.md000066400000000000000000000066371514360242400242320ustar00rootroot00000000000000# Embedding Service Example A FastAPI-based text embedding service using sentence-transformers, powered by gunicorn's dirty workers for efficient ML model management. ## Overview This example demonstrates how to build a production-ready embedding API that: - Keeps ML models loaded in memory across requests (dirty workers) - Handles HTTP efficiently with async FastAPI (ASGI workers) - Provides batch embedding for multiple texts - Includes Docker-based deployment and testing ## Architecture ``` ┌─────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ │ HTTP Clients │────►│ FastAPI (ASGI) │────►│ DirtyWorker │ │ │ │ - /embed │ │ - sentence- │ │ │◄────│ - /health │◄────│ transformers │ └─────────────────┘ └──────────────────┘ │ - Model in memory │ └─────────────────────┘ ``` **Why dirty workers?** - ML models are expensive to load (several seconds) - Dirty workers load the model once at startup - HTTP workers remain lightweight and responsive - Model stays in memory, serving many requests ## Quick Start ### With Docker (recommended) ```bash cd examples/embedding_service docker compose up --build ``` ### Local Development ```bash # Install dependencies pip install sentence-transformers fastapi pydantic # Run with gunicorn gunicorn examples.embedding_service.main:app \ -c examples/embedding_service/gunicorn_conf.py ``` ## API Reference ### POST /embed Generate embeddings for a list of texts. **Request:** ```json { "texts": ["Hello world", "Another sentence"] } ``` **Response:** ```json { "embeddings": [ [0.123, -0.456, ...], [0.789, -0.012, ...] ] } ``` **Example:** ```bash curl -X POST http://localhost:8000/embed \ -H "Content-Type: application/json" \ -d '{"texts": ["Hello world"]}' ``` ### GET /health Health check endpoint. **Response:** ```json {"status": "ok"} ``` ## Configuration Edit `gunicorn_conf.py` to adjust: | Setting | Default | Description | |---------|---------|-------------| | `workers` | 2 | Number of HTTP workers | | `dirty_workers` | 1 | Number of ML model workers | | `dirty_timeout` | 60 | Max seconds per inference | | `bind` | 0.0.0.0:8000 | Listen address | ## Model Uses [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2): - 384-dimensional embeddings - Fast inference (~14K sentences/sec on GPU) - Good quality for semantic search - ~90MB download To use a different model, edit `embedding_app.py`: ```python self.model = SentenceTransformer('your-model-name') ``` ## Testing Run the integration tests: ```bash # Start the service first docker compose up -d # Run tests pip install requests numpy python test_embedding.py ``` ## Production Considerations 1. **GPU Support**: Add CUDA to the Dockerfile for faster inference 2. **Scaling**: Increase `dirty_workers` for more concurrent embeddings 3. **Caching**: Add Redis caching for repeated texts 4. **Rate Limiting**: Add FastAPI middleware for rate limiting 5. **Monitoring**: Add Prometheus metrics endpoint benoitc-gunicorn-f5fb19e/examples/embedding_service/__init__.py000066400000000000000000000002061514360242400250460ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Embedding service package benoitc-gunicorn-f5fb19e/examples/embedding_service/docker-compose.yml000066400000000000000000000006141514360242400263750ustar00rootroot00000000000000services: embedding-service: build: context: ../.. dockerfile: examples/embedding_service/Dockerfile ports: - "8000:8000" healthcheck: test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health', timeout=5)"] interval: 10s timeout: 5s retries: 5 start_period: 30s # Model loading time benoitc-gunicorn-f5fb19e/examples/embedding_service/embedding_app.py000066400000000000000000000007341514360242400260730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.dirty.app import DirtyApp class EmbeddingApp(DirtyApp): def init(self): from sentence_transformers import SentenceTransformer self.model = SentenceTransformer('all-MiniLM-L6-v2') def embed(self, texts): embeddings = self.model.encode(texts) return embeddings.tolist() def close(self): del self.model benoitc-gunicorn-f5fb19e/examples/embedding_service/gunicorn_conf.py000066400000000000000000000004341514360242400261430ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. bind = "0.0.0.0:8000" workers = 2 worker_class = "asgi" # Dirty worker config dirty_apps = ["embedding_service.embedding_app:EmbeddingApp"] dirty_workers = 1 dirty_timeout = 60 benoitc-gunicorn-f5fb19e/examples/embedding_service/main.py000066400000000000000000000013231514360242400242340ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from fastapi import FastAPI from pydantic import BaseModel from gunicorn.dirty.client import get_dirty_client app = FastAPI() class EmbedRequest(BaseModel): texts: list[str] class EmbedResponse(BaseModel): embeddings: list[list[float]] @app.post("/embed", response_model=EmbedResponse) async def embed(request: EmbedRequest): client = get_dirty_client() result = client.execute( "embedding_service.embedding_app:EmbeddingApp", "embed", request.texts ) return EmbedResponse(embeddings=result) @app.get("/health") async def health(): return {"status": "ok"} benoitc-gunicorn-f5fb19e/examples/embedding_service/requirements.txt000066400000000000000000000000661514360242400262250ustar00rootroot00000000000000sentence-transformers fastapi pydantic requests numpy benoitc-gunicorn-f5fb19e/examples/embedding_service/test_embedding.py000066400000000000000000000021331514360242400262650ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import requests import numpy as np def test_embedding_endpoint(): base_url = os.environ.get("EMBEDDING_SERVICE_URL", "http://127.0.0.1:8000") url = f"{base_url}/embed" # Test single text response = requests.post(url, json={"texts": ["Hello world"]}) assert response.status_code == 200 data = response.json() assert len(data["embeddings"]) == 1 assert len(data["embeddings"][0]) == 384 # MiniLM dimension # Test batch texts = ["First sentence", "Second sentence", "Third one"] response = requests.post(url, json={"texts": texts}) assert response.status_code == 200 data = response.json() assert len(data["embeddings"]) == 3 # Test similarity (same text = same embedding) response = requests.post(url, json={"texts": ["test", "test"]}) emb1, emb2 = response.json()["embeddings"] assert np.allclose(emb1, emb2, rtol=1e-5, atol=1e-6) print("All tests passed!") if __name__ == "__main__": test_embedding_endpoint() benoitc-gunicorn-f5fb19e/examples/example_config.py000066400000000000000000000170231514360242400226360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Sample Gunicorn configuration file. # # Server socket # # bind - The socket to bind. # # A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'. # An IP is a valid HOST. # # backlog - The number of pending connections. This refers # to the number of clients that can be waiting to be # served. Exceeding this number results in the client # getting an error when attempting to connect. It should # only affect servers under significant load. # # Must be a positive integer. Generally set in the 64-2048 # range. # bind = '127.0.0.1:8000' backlog = 2048 # # Worker processes # # workers - The number of worker processes that this server # should keep alive for handling requests. # # A positive integer generally in the 2-4 x $(NUM_CORES) # range. You'll want to vary this a bit to find the best # for your particular application's work load. # # worker_class - The type of workers to use. The default # sync class should handle most 'normal' types of work # loads. You'll want to read # https://gunicorn.org/design/#choosing-a-worker-type # for information on when you might want to choose one # of the other worker classes. # # A string referring to a Python path to a subclass of # gunicorn.workers.base.Worker. The default provided values # can be seen at # https://gunicorn.org/reference/settings/#worker_class # # worker_connections - For the gevent and gthread worker classes # this limits the maximum number of simultaneous clients that # a single process can handle. # # A positive integer generally set to around 1000. # # timeout - If a worker does not notify the master process in this # number of seconds it is killed and a new worker is spawned # to replace it. # # Generally set to thirty seconds. Only set this noticeably # higher if you're sure of the repercussions for sync workers. # For the non sync workers it just means that the worker # process is still communicating and is not tied to the length # of time required to handle a single request. # # keepalive - The number of seconds to wait for the next request # on a Keep-Alive HTTP connection. # # A positive integer. Generally set in the 1-5 seconds range. # workers = 1 worker_class = 'sync' worker_connections = 1000 timeout = 30 keepalive = 2 # # spew - Install a trace function that spews every line of Python # that is executed when running the server. This is the # nuclear option. # # True or False # spew = False # # Server mechanics # # daemon - Detach the main Gunicorn process from the controlling # terminal with a standard fork/fork sequence. # # True or False # # raw_env - Pass environment variables to the execution environment. # # pidfile - The path to a pid file to write # # A path string or None to not write a pid file. # # user - Switch worker processes to run as this user. # # A valid user id (as an integer) or the name of a user that # can be retrieved with a call to pwd.getpwnam(value) or None # to not change the worker process user. # # group - Switch worker process to run as this group. # # A valid group id (as an integer) or the name of a user that # can be retrieved with a call to pwd.getgrnam(value) or None # to change the worker processes group. # # umask - A mask for file permissions written by Gunicorn. Note that # this affects unix socket permissions. # # A valid value for the os.umask(mode) call or a string # compatible with int(value, 0) (0 means Python guesses # the base, so values like "0", "0xFF", "0022" are valid # for decimal, hex, and octal representations) # # tmp_upload_dir - A directory to store temporary request data when # requests are read. This will most likely be disappearing soon. # # A path to a directory where the process owner can write. Or # None to signal that Python should choose one on its own. # daemon = False raw_env = [ 'DJANGO_SECRET_KEY=something', 'SPAM=eggs', ] pidfile = None umask = 0 user = None group = None tmp_upload_dir = None # # Logging # # logfile - The path to a log file to write to. # # A path string. "-" means log to stdout. # # loglevel - The granularity of log output # # A string of "debug", "info", "warning", "error", "critical" # errorlog = '-' loglevel = 'info' accesslog = '-' access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' # # Process naming # # proc_name - A base to use with setproctitle to change the way # that Gunicorn processes are reported in the system process # table. This affects things like 'ps' and 'top'. If you're # going to be running more than one instance of Gunicorn you'll # probably want to set a name to tell them apart. This requires # that you install the setproctitle module. # # A string or None to choose a default of something like 'gunicorn'. # proc_name = None # # Server hooks # # post_fork - Called just after a worker has been forked. # # A callable that takes a server and worker instance # as arguments. # # pre_fork - Called just prior to forking the worker subprocess. # # A callable that accepts the same arguments as post_fork # # pre_exec - Called just prior to forking off a secondary # master process during things like config reloading. # # A callable that takes a server instance as the sole argument. # def post_fork(server, worker): server.log.info("Worker spawned (pid: %s)", worker.pid) def pre_fork(server, worker): pass def pre_exec(server): server.log.info("Forked child, re-executing.") def when_ready(server): server.log.info("Server is ready. Spawning workers") def worker_int(worker): worker.log.info("worker received INT or QUIT signal") ## get traceback info import threading, sys, traceback id2name = {th.ident: th.name for th in threading.enumerate()} code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""), threadId)) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) worker.log.debug("\n".join(code)) def worker_abort(worker): worker.log.info("worker received SIGABRT signal") def ssl_context(conf, default_ssl_context_factory): import ssl # The default SSLContext returned by the factory function is initialized # with the TLS parameters from config, including TLS certificates and other # parameters. context = default_ssl_context_factory() # The SSLContext can be further customized, for example by enforcing # minimum TLS version. context.minimum_version = ssl.TLSVersion.TLSv1_3 # Server can also return different server certificate depending which # hostname the client uses. Requires Python 3.7 or later. def sni_callback(socket, server_hostname, context): if server_hostname == "foo.127.0.0.1.nip.io": new_context = default_ssl_context_factory() new_context.load_cert_chain(certfile="foo.pem", keyfile="foo-key.pem") socket.context = new_context context.sni_callback = sni_callback return context benoitc-gunicorn-f5fb19e/examples/frameworks/000077500000000000000000000000001514360242400214615ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/cherryapp.py000066400000000000000000000004501514360242400240270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import cherrypy class Root: @cherrypy.expose def index(self): return 'Hello World!' cherrypy.config.update({'environment': 'embedded'}) app = cherrypy.tree.mount(Root()) benoitc-gunicorn-f5fb19e/examples/frameworks/django/000077500000000000000000000000001514360242400227235ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/django/README000066400000000000000000000000741514360242400236040ustar00rootroot00000000000000Applications to test Django support: testing -> Django 1.4 benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/000077500000000000000000000000001514360242400244005ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/manage.py000066400000000000000000000005361514360242400262060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python import os, sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testing.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv) benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/000077500000000000000000000000001514360242400260555ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/__init__.py000066400000000000000000000001521514360242400301640ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/000077500000000000000000000000001514360242400270205ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/__init__.py000066400000000000000000000001521514360242400311270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/000077500000000000000000000000001514360242400304645ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/__init__.py000066400000000000000000000001521514360242400325730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/middleware.py000066400000000000000000000013251514360242400331540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from multiprocessing import Process, Queue import requests def child_process(queue): while True: print(queue.get()) requests.get('http://requestb.in/15s95oz1') class GunicornSubProcessTestMiddleware: def __init__(self): super().__init__() self.queue = Queue() self.process = Process(target=child_process, args=(self.queue,)) self.process.start() def process_request(self, request): self.queue.put(('REQUEST',)) def process_response(self, request, response): self.queue.put(('RESPONSE', response.status_code)) return response benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/models.py000066400000000000000000000001521514360242400323170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/templates/000077500000000000000000000000001514360242400324625ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/templates/base.html000066400000000000000000000013671514360242400342710ustar00rootroot00000000000000 gunicorn django example app

test app

{% block content %}{% endblock %}
benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/templates/home.html000066400000000000000000000007271514360242400343060ustar00rootroot00000000000000{% extends "base.html" %} {% block content %}
{% csrf_token %} {{ form.as_table }}

Got

{% if subject %}

subject:
{{ subject}}

message:
{{ message }}

size:
{{ size }}

{% endif %} {% endblock content %} benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/tests.py000066400000000000000000000011471514360242400322030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ This file demonstrates two different styles of tests (one doctest and one unittest). These will both pass when you run "manage.py test". Replace these with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2) __test__ = {"doctest": """ Another way to test that 1 + 1 is equal to 2. >>> 1 + 1 == 2 True """} benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/urls.py000066400000000000000000000003571514360242400320300ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from django.conf.urls import url from . import views urlpatterns = [ url(r'^acsv$', views.acsv), url(r'^$', views.home), ] benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/apps/someapp/views.py000066400000000000000000000031661514360242400322010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import csv import io import os from django import forms from django.http import HttpResponse from django.shortcuts import render from django.template import RequestContext class MsgForm(forms.Form): subject = forms.CharField(max_length=100) message = forms.CharField() f = forms.FileField() def home(request): from django.conf import settings print(settings.SOME_VALUE) subject = None message = None size = 0 print(request.META) if request.POST: form = MsgForm(request.POST, request.FILES) print(request.FILES) if form.is_valid(): subject = form.cleaned_data['subject'] message = form.cleaned_data['message'] f = request.FILES['f'] if not hasattr(f, "fileno"): size = len(f.read()) else: try: size = int(os.fstat(f.fileno())[6]) except io.UnsupportedOperation: size = len(f.read()) else: form = MsgForm() return render(request, 'home.html', { 'form': form, 'subject': subject, 'message': message, 'size': size }) def acsv(request): rows = [ {'a': 1, 'b': 2}, {'a': 3, 'b': 3} ] response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=report.csv' writer = csv.writer(response) writer.writerow(['a', 'b']) for r in rows: writer.writerow([r['a'], r['b']]) return response benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/settings.py000066400000000000000000000133141514360242400302710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Django settings for testing project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'testdb.sql', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'what' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', # uncomment the next line to test multiprocessing #'testing.apps.someapp.middleware.GunicornSubProcessTestMiddleware', ) ROOT_URLCONF = 'testing.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'testing.wsgi.application' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { # ... some options here ... }, }, ] TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'testing.apps.someapp', 'gunicorn' ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } SOME_VALUE = "test on reload" benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/urls.py000066400000000000000000000012531514360242400274150ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from django.conf.urls import include, url # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = [ # Examples: # url(r'^$', 'testing.views.home', name='home'), # url(r'^testing/', include('testing.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', admin.site.urls), url(r'^', include("testing.apps.someapp.urls")), ] benoitc-gunicorn-f5fb19e/examples/frameworks/django/testing/testing/wsgi.py000066400000000000000000000026711514360242400274060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WSGI config for testing project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os import sys # make sure the current project is in PYTHONPATH sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) # set the environment settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testing.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) benoitc-gunicorn-f5fb19e/examples/frameworks/flask_sendfile.py000066400000000000000000000006151514360242400250060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io from flask import Flask, send_file app = Flask(__name__) @app.route('/') def index(): buf = io.BytesIO() buf.write(b'hello world') buf.seek(0) return send_file(buf, attachment_filename="testing.txt", as_attachment=True) benoitc-gunicorn-f5fb19e/examples/frameworks/flaskapp.py000066400000000000000000000003751514360242400236410ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Run with: # # $ gunicorn flaskapp:app # from flask import Flask app = Flask(__name__) @app.route("/") def hello(): return "Hello World!" benoitc-gunicorn-f5fb19e/examples/frameworks/flaskapp_aiohttp_wsgi.py000066400000000000000000000011371514360242400264170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Example command to run the example: # # $ gunicorn flaskapp_aiohttp_wsgi:aioapp -k aiohttp.worker.GunicornWebWorker # from aiohttp import web from aiohttp_wsgi import WSGIHandler from flask import Flask app = Flask(__name__) @app.route('/') def hello(): return 'Hello, world!' def make_aiohttp_app(app): wsgi_handler = WSGIHandler(app) aioapp = web.Application() aioapp.router.add_route('*', '/{path_info:.*}', wsgi_handler) return aioapp aioapp = make_aiohttp_app(app) benoitc-gunicorn-f5fb19e/examples/frameworks/pyramidapp.py000066400000000000000000000006741514360242400242100ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from pyramid.config import Configurator from pyramid.response import Response def hello_world(request): return Response('Hello world!') def goodbye_world(request): return Response('Goodbye world!') config = Configurator() config.add_view(hello_world) config.add_view(goodbye_world, name='goodbye') app = config.make_wsgi_app() benoitc-gunicorn-f5fb19e/examples/frameworks/requirements.txt000066400000000000000000000002261514360242400247450ustar00rootroot00000000000000-r requirements_flaskapp.txt -r requirements_cherryapp.txt -r requirements_pyramidapp.txt -r requirements_tornadoapp.txt -r requirements_webpyapp.txt benoitc-gunicorn-f5fb19e/examples/frameworks/requirements_cherryapp.txt000066400000000000000000000000111514360242400270120ustar00rootroot00000000000000cherrypy benoitc-gunicorn-f5fb19e/examples/frameworks/requirements_flaskapp.txt000066400000000000000000000000061514360242400266220ustar00rootroot00000000000000flask benoitc-gunicorn-f5fb19e/examples/frameworks/requirements_pyramidapp.txt000066400000000000000000000000101514360242400271620ustar00rootroot00000000000000pyramid benoitc-gunicorn-f5fb19e/examples/frameworks/requirements_tornadoapp.txt000066400000000000000000000000121514360242400271650ustar00rootroot00000000000000tornado<6 benoitc-gunicorn-f5fb19e/examples/frameworks/requirements_webpyapp.txt000066400000000000000000000000071514360242400266510ustar00rootroot00000000000000web-py benoitc-gunicorn-f5fb19e/examples/frameworks/tornadoapp.py000066400000000000000000000012041514360242400241770ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Run with: # # $ gunicorn -k tornado tornadoapp:app # import asyncio import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): async def get(self): # Your asynchronous code here await asyncio.sleep(1) # Example of an asynchronous operation self.write("Hello, World!") def make_app(): return tornado.web.Application([ (r"/", MainHandler), ]) app = make_app() if __name__ == "__main__": app.listen(8888) tornado.ioloop.IOLoop.current().start() benoitc-gunicorn-f5fb19e/examples/frameworks/webpyapp.py000066400000000000000000000004571514360242400236700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Run with # # $ gunicorn webpyapp:app # import web urls = ( '/', 'index' ) class index: def GET(self): return "Hello, world!" app = web.application(urls, globals()).wsgifunc() benoitc-gunicorn-f5fb19e/examples/gunicorn_rc000077500000000000000000000003271514360242400215410ustar00rootroot00000000000000#!/bin/sh GUNICORN=/usr/local/bin/gunicorn ROOT=/path/to/project PID=/var/run/gunicorn.pid APP=main:application if [ -f $PID ]; then rm $PID; fi cd $ROOT exec $GUNICORN -c $ROOT/gunicorn.conf.py --pid=$PID $APP benoitc-gunicorn-f5fb19e/examples/hello.txt000066400000000000000000000000151514360242400211410ustar00rootroot00000000000000Hello world! benoitc-gunicorn-f5fb19e/examples/http2_features/000077500000000000000000000000001514360242400222405ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/http2_features/Dockerfile000066400000000000000000000010321514360242400242260ustar00rootroot00000000000000FROM python:3.12-slim WORKDIR /app # Install h2 for HTTP/2 support and httpx for testing RUN pip install --no-cache-dir h2 httpx # Copy gunicorn source and install COPY . /app/gunicorn-src RUN pip install /app/gunicorn-src # Copy example app COPY examples/http2_features /app/http2_features # Copy SSL certificates COPY examples/server.crt /app/certs/server.crt COPY examples/server.key /app/certs/server.key ENV PYTHONPATH=/app EXPOSE 8443 CMD ["gunicorn", "http2_features.http2_app:app", "-c", "http2_features/gunicorn_conf.py"] benoitc-gunicorn-f5fb19e/examples/http2_features/__init__.py000066400000000000000000000001511514360242400243460ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/examples/http2_features/docker-compose.yml000066400000000000000000000005351514360242400257000ustar00rootroot00000000000000services: http2-features: build: context: ../.. dockerfile: examples/http2_features/Dockerfile ports: - "8443:8443" healthcheck: test: ["CMD", "python", "-c", "import httpx; httpx.get('https://127.0.0.1:8443/health', verify=False)"] interval: 10s timeout: 5s retries: 5 start_period: 5s benoitc-gunicorn-f5fb19e/examples/http2_features/gunicorn_conf.py000066400000000000000000000010271514360242400254430ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Gunicorn configuration for HTTP/2 features example bind = "0.0.0.0:8443" workers = 2 worker_class = "asgi" # SSL configuration (required for HTTP/2) certfile = "/app/certs/server.crt" keyfile = "/app/certs/server.key" # HTTP/2 configuration http_protocols = "h2,h1" http2_max_concurrent_streams = 100 http2_initial_window_size = 65535 http2_max_frame_size = 16384 # Logging accesslog = "-" errorlog = "-" loglevel = "info" benoitc-gunicorn-f5fb19e/examples/http2_features/http2_app.py000066400000000000000000000207241514360242400245200ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 ASGI application demonstrating priority and trailers. This example shows how to: - Access stream priority information from HTTP/2 requests - Send response trailers (useful for gRPC, checksums, etc.) Run with: cd examples/http2_features docker compose up --build Test with: python test_http2.py Or manually: curl -k --http2 https://localhost:8443/ curl -k --http2 https://localhost:8443/priority curl -k --http2 https://localhost:8443/trailers """ import json import hashlib async def app(scope, receive, send): """ASGI application demonstrating HTTP/2 priority and trailers.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) elif scope["type"] == "http": await handle_http(scope, receive, send) else: raise ValueError(f"Unknown scope type: {scope['type']}") async def handle_lifespan(scope, receive, send): """Handle lifespan events (startup/shutdown).""" while True: message = await receive() if message["type"] == "lifespan.startup": print("HTTP/2 features app starting...") await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": print("HTTP/2 features app shutting down...") await send({"type": "lifespan.shutdown.complete"}) return async def handle_http(scope, receive, send): """Route HTTP requests to handlers.""" path = scope["path"] method = scope["method"] if path == "/" and method == "GET": await handle_index(scope, receive, send) elif path == "/priority" and method == "GET": await handle_priority(scope, receive, send) elif path == "/trailers" and method in ("GET", "POST"): await handle_trailers(scope, receive, send) elif path == "/combined" and method in ("GET", "POST"): await handle_combined(scope, receive, send) elif path == "/health" and method == "GET": await send_response(send, 200, b"OK") else: await send_response(send, 404, b"Not Found\n") async def handle_index(scope, receive, send): """Show available endpoints and HTTP/2 features.""" extensions = scope.get("extensions", {}) http_version = scope.get("http_version", "1.1") info = { "message": "HTTP/2 Features Demo", "http_version": http_version, "endpoints": { "/": "This info page", "/priority": "Shows stream priority information", "/trailers": "Demonstrates response trailers with checksum", "/combined": "Shows both priority and trailers", "/health": "Health check endpoint", }, "extensions": list(extensions.keys()), } body = json.dumps(info, indent=2).encode() + b"\n" await send_response(send, 200, body, content_type=b"application/json") async def handle_priority(scope, receive, send): """Return stream priority information. HTTP/2 allows clients to indicate relative importance of requests. Gunicorn exposes this through the http.response.priority extension. """ extensions = scope.get("extensions", {}) priority_info = extensions.get("http.response.priority") if priority_info: response = { "http_version": scope.get("http_version", "1.1"), "priority": { "weight": priority_info["weight"], "depends_on": priority_info["depends_on"], "description": ( f"Weight {priority_info['weight']}/256 - " f"{'high' if priority_info['weight'] > 128 else 'normal' if priority_info['weight'] > 64 else 'low'} priority" ), }, "note": "Priority is advisory - use for scheduling hints", } else: response = { "http_version": scope.get("http_version", "1.1"), "priority": None, "note": "Priority information only available for HTTP/2 requests", } body = json.dumps(response, indent=2).encode() + b"\n" await send_response(send, 200, body, content_type=b"application/json") async def handle_trailers(scope, receive, send): """Demonstrate response trailers. Trailers are headers sent after the response body. Common uses: gRPC status codes, checksums, timing info. """ extensions = scope.get("extensions", {}) supports_trailers = "http.response.trailers" in extensions # Read request body if POST body_data = b"" if scope["method"] == "POST": body_data = await read_body(receive) # Generate response response_body = body_data if body_data else b"Hello from HTTP/2 with trailers!\n" # Calculate checksum for trailer checksum = hashlib.md5(response_body).hexdigest() if supports_trailers: # Send response announcing trailers await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/octet-stream"), (b"trailer", b"content-md5, x-processing-time"), ], }) # Send body await send({ "type": "http.response.body", "body": response_body, "more_body": False, }) # Send trailers await send({ "type": "http.response.trailers", "headers": [ (b"content-md5", checksum.encode()), (b"x-processing-time", b"42ms"), ], }) else: # HTTP/1.1 fallback - include checksum in regular headers response = { "message": "Trailers not supported (HTTP/1.1)", "data": response_body.decode("utf-8", errors="replace"), "checksum_in_header": checksum, } body = json.dumps(response, indent=2).encode() + b"\n" await send_response( send, 200, body, content_type=b"application/json", extra_headers=[(b"x-checksum", checksum.encode())] ) async def handle_combined(scope, receive, send): """Show both priority and trailers in one response. This demonstrates a realistic scenario like gRPC where priority affects scheduling and trailers carry status. """ extensions = scope.get("extensions", {}) priority_info = extensions.get("http.response.priority") supports_trailers = "http.response.trailers" in extensions # Build response showing all HTTP/2 features response = { "http_version": scope.get("http_version", "1.1"), "priority": None, "trailers_supported": supports_trailers, } if priority_info: response["priority"] = { "weight": priority_info["weight"], "depends_on": priority_info["depends_on"], } response_body = json.dumps(response, indent=2).encode() + b"\n" checksum = hashlib.md5(response_body).hexdigest() if supports_trailers: # Full HTTP/2 response with trailers await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"trailer", b"content-md5, x-status"), ], }) await send({ "type": "http.response.body", "body": response_body, "more_body": False, }) await send({ "type": "http.response.trailers", "headers": [ (b"content-md5", checksum.encode()), (b"x-status", b"success"), ], }) else: await send_response(send, 200, response_body, content_type=b"application/json") async def send_response(send, status, body, content_type=b"text/plain", extra_headers=None): """Send a simple HTTP response.""" headers = [ (b"content-type", content_type), (b"content-length", str(len(body)).encode()), ] if extra_headers: headers.extend(extra_headers) await send({ "type": "http.response.start", "status": status, "headers": headers, }) await send({ "type": "http.response.body", "body": body, }) async def read_body(receive): """Read the full request body.""" body = b"" while True: message = await receive() body += message.get("body", b"") if not message.get("more_body", False): break return body benoitc-gunicorn-f5fb19e/examples/http2_features/requirements.txt000066400000000000000000000000711514360242400255220ustar00rootroot00000000000000# Requirements for testing HTTP/2 features httpx>=0.24.0 benoitc-gunicorn-f5fb19e/examples/http2_features/test_http2.py000066400000000000000000000212131514360242400247110ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python """ Test script for HTTP/2 features example. This script tests: - HTTP/2 connection establishment - Stream priority access - Response trailers Run the server first: docker compose up --build Then run tests: python test_http2.py Or run directly against local server: python test_http2.py --url https://localhost:8443 """ import argparse import json import ssl import socket import sys from urllib.parse import urlparse def create_h2_connection(host, port): """Create an HTTP/2 connection using the h2 library.""" try: import h2.connection import h2.config except ImportError: print("Please install h2: pip install h2") sys.exit(1) # Create socket with SSL sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE ctx.set_alpn_protocols(['h2']) sock = ctx.wrap_socket(sock, server_hostname=host) sock.connect((host, port)) sock.settimeout(10.0) # Verify ALPN alpn = sock.selected_alpn_protocol() if alpn != 'h2': raise RuntimeError(f"HTTP/2 not negotiated, got: {alpn}") # Create h2 connection config = h2.config.H2Configuration(client_side=True) h2_conn = h2.connection.H2Connection(config=config) h2_conn.initiate_connection() sock.sendall(h2_conn.data_to_send()) # Receive server settings data = sock.recv(65536) h2_conn.receive_data(data) sock.sendall(h2_conn.data_to_send()) return sock, h2_conn def h2_request(sock, h2_conn, stream_id, method, path, authority): """Make an HTTP/2 request and return the response.""" import h2.events # Send request h2_conn.send_headers(stream_id, [ (':method', method), (':path', path), (':authority', authority), (':scheme', 'https'), ], end_stream=True) sock.sendall(h2_conn.data_to_send()) # Collect response status = None headers = {} body = b'' trailers = {} while True: data = sock.recv(65536) if not data: break events = h2_conn.receive_data(data) to_send = h2_conn.data_to_send() if to_send: sock.sendall(to_send) for event in events: if isinstance(event, h2.events.ResponseReceived): if event.stream_id == stream_id: for name, value in event.headers: if name == b':status': status = int(value.decode()) else: headers[name.decode()] = value.decode() elif isinstance(event, h2.events.DataReceived): if event.stream_id == stream_id: body += event.data elif isinstance(event, h2.events.TrailersReceived): if event.stream_id == stream_id: for name, value in event.headers: trailers[name.decode()] = value.decode() elif isinstance(event, h2.events.StreamEnded): if event.stream_id == stream_id: return { 'status': status, 'headers': headers, 'body': body, 'trailers': trailers, } elif isinstance(event, h2.events.ConnectionTerminated): raise RuntimeError(f"Connection terminated: {event.error_code}") return None def test_http2_connection(host, port): """Test that HTTP/2 is negotiated.""" print("\n=== Testing HTTP/2 Connection ===") try: sock, h2_conn = create_h2_connection(host, port) print("HTTP/2 connection established successfully!") response = h2_request(sock, h2_conn, 1, 'GET', '/', f'{host}:{port}') print(f"Status: {response['status']}") data = json.loads(response['body'].decode()) print(f"Extensions available: {data.get('extensions', [])}") sock.close() return response['status'] == 200 except Exception as e: print(f"ERROR: {e}") return False def test_priority(host, port): """Test stream priority endpoint.""" print("\n=== Testing Stream Priority ===") try: sock, h2_conn = create_h2_connection(host, port) response = h2_request(sock, h2_conn, 1, 'GET', '/priority', f'{host}:{port}') print(f"Status: {response['status']}") data = json.loads(response['body'].decode()) print(f"Priority info: {data.get('priority')}") if data.get("priority"): print(f" Weight: {data['priority']['weight']}") print(f" Depends on: {data['priority']['depends_on']}") sock.close() return response['status'] == 200 and data.get("priority") is not None except Exception as e: print(f"ERROR: {e}") return False def test_trailers(host, port): """Test response trailers.""" print("\n=== Testing Response Trailers ===") try: sock, h2_conn = create_h2_connection(host, port) response = h2_request(sock, h2_conn, 1, 'GET', '/trailers', f'{host}:{port}') print(f"Status: {response['status']}") print(f"Headers: {response['headers']}") if response['trailers']: print(f"Trailers received: {response['trailers']}") if 'content-md5' in response['trailers']: print(f" Content-MD5: {response['trailers']['content-md5']}") else: print("Note: No trailers received (client may not have advertised support)") sock.close() return response['status'] == 200 except Exception as e: print(f"ERROR: {e}") return False def test_combined(host, port): """Test combined priority and trailers.""" print("\n=== Testing Combined Features ===") try: sock, h2_conn = create_h2_connection(host, port) response = h2_request(sock, h2_conn, 1, 'GET', '/combined', f'{host}:{port}') print(f"Status: {response['status']}") data = json.loads(response['body'].decode()) print(f"Response: {json.dumps(data, indent=2)}") if response['trailers']: print(f"Trailers: {response['trailers']}") sock.close() return response['status'] == 200 except Exception as e: print(f"ERROR: {e}") return False def test_multiple_streams(host, port): """Test multiple requests on the same connection.""" print("\n=== Testing Multiple Streams ===") try: sock, h2_conn = create_h2_connection(host, port) # Make multiple requests on the same connection paths = ['/', '/priority', '/trailers', '/combined'] for i, path in enumerate(paths): stream_id = i * 2 + 1 # Odd numbers for client-initiated streams response = h2_request(sock, h2_conn, stream_id, 'GET', path, f'{host}:{port}') print(f" {path}: {response['status']}") sock.close() return True except Exception as e: print(f"ERROR: {e}") return False def main(): parser = argparse.ArgumentParser(description="Test HTTP/2 features") parser.add_argument( "--url", default="https://localhost:8443", help="Base URL of the server (default: https://localhost:8443)" ) args = parser.parse_args() parsed = urlparse(args.url) host = parsed.hostname or 'localhost' port = parsed.port or 8443 print(f"Testing against: {host}:{port}") results = [] try: results.append(("HTTP/2 Connection", test_http2_connection(host, port))) results.append(("Stream Priority", test_priority(host, port))) results.append(("Response Trailers", test_trailers(host, port))) results.append(("Combined Features", test_combined(host, port))) results.append(("Multiple Streams", test_multiple_streams(host, port))) except ConnectionRefusedError: print(f"\nConnection refused to {host}:{port}") print("Make sure the server is running: docker compose up --build") return 1 except Exception as e: print(f"\nUnexpected error: {e}") return 1 print("\n=== Test Results ===") all_passed = True for name, passed in results: status = "PASS" if passed else "FAIL" print(f" {name}: {status}") if not passed: all_passed = False if all_passed: print("\nAll tests passed!") return 0 else: print("\nSome tests failed.") return 1 if __name__ == "__main__": sys.exit(main()) benoitc-gunicorn-f5fb19e/examples/http2_gevent/000077500000000000000000000000001514360242400217125ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/http2_gevent/.gitignore000066400000000000000000000001041514360242400236750ustar00rootroot00000000000000# Generated certificates - run ./generate_certs.sh to create certs/ benoitc-gunicorn-f5fb19e/examples/http2_gevent/Dockerfile000066400000000000000000000024521514360242400237070ustar00rootroot00000000000000# HTTP/2 with Gevent Example # # Build: docker build -t gunicorn-http2-gevent . # Run: docker run -p 8443:8443 -v $(pwd)/certs:/certs:ro gunicorn-http2-gevent FROM python:3.12-slim # Install build dependencies for gevent and h2 RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ libc-dev \ libffi-dev \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # Copy gunicorn source and install with gevent and http2 support # For production, use: pip install gunicorn[gevent,http2] COPY --chown=root:root . /gunicorn-src/ RUN pip install --no-cache-dir /gunicorn-src/[gevent,http2] # Copy application files COPY examples/http2_gevent/app.py /app/ COPY examples/http2_gevent/gunicorn_conf.py /app/ # Create non-root user for security RUN useradd -m -u 1000 gunicorn && \ chown -R gunicorn:gunicorn /app USER gunicorn EXPOSE 8443 # Health check HEALTHCHECK --interval=10s --timeout=5s --start-period=5s --retries=3 \ CMD python -c "import ssl,socket; s=socket.socket(); s.settimeout(2); ctx=ssl.create_default_context(); ctx.check_hostname=False; ctx.verify_mode=ssl.CERT_NONE; ss=ctx.wrap_socket(s,server_hostname='localhost'); ss.connect(('localhost',8443)); ss.close()" || exit 1 # Run gunicorn with the config file CMD ["gunicorn", "--config", "gunicorn_conf.py", "app:app"] benoitc-gunicorn-f5fb19e/examples/http2_gevent/README.md000066400000000000000000000072171514360242400232000ustar00rootroot00000000000000# HTTP/2 with Gevent Worker Example This example demonstrates how to run Gunicorn with HTTP/2 support using the gevent async worker. ## Features - HTTP/2 protocol with ALPN negotiation - Gevent-based async worker for high concurrency - Connection multiplexing (multiple streams per connection) - Flow control for large transfers - SSL/TLS encryption (required for HTTP/2) ## Quick Start ### 1. Generate SSL Certificates HTTP/2 requires TLS. Generate self-signed certificates for testing: ```bash chmod +x generate_certs.sh ./generate_certs.sh ``` ### 2. Start with Docker Compose ```bash docker compose up -d ``` ### 3. Test the Server Using curl with HTTP/2: ```bash # Basic request curl -k --http2 https://localhost:8443/ # Check HTTP version curl -k --http2 -w "HTTP Version: %{http_version}\n" https://localhost:8443/ # Test echo endpoint curl -k --http2 -X POST -d "Hello HTTP/2" https://localhost:8443/echo # Get server info curl -k --http2 https://localhost:8443/info | jq ``` ### 4. Run Tests ```bash # Install test dependencies pip install httpx[http2] pytest pytest-asyncio # Run tests python test_http2_gevent.py # Or with pytest for more detail pytest test_http2_gevent.py -v ``` ## Running Locally (Without Docker) ### Prerequisites ```bash pip install gunicorn[gevent,http2] ``` ### Generate Certificates ```bash ./generate_certs.sh ``` ### Start Server ```bash gunicorn --config gunicorn_conf.py app:app ``` Or with command-line options: ```bash gunicorn app:app \ --bind 0.0.0.0:8443 \ --worker-class gevent \ --workers 4 \ --worker-connections 1000 \ --http-protocols h2,h1 \ --certfile certs/server.crt \ --keyfile certs/server.key ``` ## Configuration Options ### HTTP/2 Settings | Setting | Default | Description | |---------|---------|-------------| | `http_protocols` | `['h1']` | Enable protocols: `['h2', 'h1']` for HTTP/2 | | `http2_max_concurrent_streams` | 100 | Max streams per connection | | `http2_initial_window_size` | 65535 | Flow control window size (bytes) | | `http2_max_frame_size` | 16384 | Max frame size (bytes) | | `http2_max_header_list_size` | 65536 | Max header list size (bytes) | ### Gevent Worker Settings | Setting | Default | Description | |---------|---------|-------------| | `worker_class` | `sync` | Set to `gevent` for async | | `workers` | 1 | Number of worker processes | | `worker_connections` | 1000 | Max clients per worker | ## Endpoints | Path | Method | Description | |------|--------|-------------| | `/` | GET | Hello message | | `/health` | GET | Health check | | `/echo` | POST | Echo request body | | `/info` | GET | Server/request info as JSON | | `/large` | GET | 1MB response (test streaming) | | `/stream` | GET | Server-sent events stream | | `/delay?seconds=N` | GET | Delayed response | | `/priority` | GET | HTTP/2 priority info | ## Performance Tips 1. **Worker Count**: Use `2 * CPU cores + 1` workers for I/O-bound apps 2. **Connections**: Increase `worker_connections` for high concurrency 3. **Window Size**: Larger `http2_initial_window_size` improves throughput for large transfers 4. **Streams**: Increase `http2_max_concurrent_streams` for many parallel requests ## Troubleshooting ### Certificate Issues ```bash # Regenerate certificates rm -rf certs/ ./generate_certs.sh ``` ### Connection Refused ```bash # Check if server is running docker compose ps # View logs docker compose logs -f ``` ### HTTP/2 Not Negotiated Ensure: - SSL/TLS is configured (certfile and keyfile) - `http_protocols` includes `'h2'` - Client supports HTTP/2 over TLS (curl with `--http2`, not `--http2-prior-knowledge`) ## License MIT License - See the main Gunicorn repository for details. benoitc-gunicorn-f5fb19e/examples/http2_gevent/app.py000066400000000000000000000101021514360242400230360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Example WSGI application demonstrating HTTP/2 with gevent worker. This application showcases various HTTP/2 features including: - Basic request/response handling - Large file transfers (streaming) - Concurrent requests (multiplexing) - Server push simulation """ import json import time def app(environ, start_response): """WSGI application for HTTP/2 demonstration.""" path = environ.get('PATH_INFO', '/') method = environ.get('REQUEST_METHOD', 'GET') # Root endpoint if path == '/': body = b'Hello from HTTP/2 with Gevent!' status = '200 OK' content_type = 'text/plain; charset=utf-8' # Health check elif path == '/health': body = b'OK' status = '200 OK' content_type = 'text/plain' # Echo endpoint - returns the request body elif path == '/echo': content_length = int(environ.get('CONTENT_LENGTH', 0) or 0) body = environ['wsgi.input'].read(content_length) status = '200 OK' content_type = 'application/octet-stream' # JSON endpoint - returns request info as JSON elif path == '/info': info = { 'method': method, 'path': path, 'protocol': environ.get('SERVER_PROTOCOL', 'unknown'), 'http_version': environ.get('HTTP_VERSION', '1.1'), 'server': 'gunicorn with gevent + HTTP/2', 'headers': { k: v for k, v in environ.items() if k.startswith('HTTP_') } } body = json.dumps(info, indent=2).encode('utf-8') status = '200 OK' content_type = 'application/json' # Large response for testing streaming/flow control elif path == '/large': # Return 1MB of data size = 1024 * 1024 body = b'X' * size status = '200 OK' content_type = 'application/octet-stream' # Streaming response using generator elif path == '/stream': def generate(): for i in range(10): yield f'data: chunk {i}\n\n'.encode('utf-8') # Small delay to simulate streaming time.sleep(0.1) start_response('200 OK', [ ('Content-Type', 'text/event-stream'), ('Cache-Control', 'no-cache'), ]) return generate() # Concurrent test endpoint with configurable delay elif path.startswith('/delay'): query = environ.get('QUERY_STRING', '') try: delay = float(query.split('=')[1]) if '=' in query else 0.5 delay = min(delay, 5.0) # Cap at 5 seconds except (ValueError, IndexError): delay = 0.5 # Use gevent sleep for cooperative yielding try: import gevent gevent.sleep(delay) except ImportError: time.sleep(delay) body = f'Delayed response after {delay}s'.encode('utf-8') status = '200 OK' content_type = 'text/plain' # HTTP/2 priority information (if available) elif path == '/priority': priority_info = { 'weight': environ.get('HTTP2_PRIORITY_WEIGHT', 'N/A'), 'depends_on': environ.get('HTTP2_PRIORITY_DEPENDS_ON', 'N/A'), 'exclusive': environ.get('HTTP2_PRIORITY_EXCLUSIVE', 'N/A'), } body = json.dumps(priority_info, indent=2).encode('utf-8') status = '200 OK' content_type = 'application/json' # 404 for unknown paths else: body = b'Not Found' status = '404 Not Found' content_type = 'text/plain' response_headers = [ ('Content-Type', content_type), ('Content-Length', str(len(body))), ('X-Worker-Type', 'gevent'), ] start_response(status, response_headers) return [body] # Allow running directly for testing if __name__ == '__main__': from wsgiref.simple_server import make_server server = make_server('localhost', 8000, app) print('Test server running on http://localhost:8000') server.serve_forever() benoitc-gunicorn-f5fb19e/examples/http2_gevent/docker-compose.yml000066400000000000000000000021751514360242400253540ustar00rootroot00000000000000# HTTP/2 with Gevent Docker Compose # # Usage: # # Generate certificates first (or use your own) # ./generate_certs.sh # # # Start services # docker compose up -d # # # Test with curl (requires curl with HTTP/2 support) # curl -k --http2 https://localhost:8443/ # # # View logs # docker compose logs -f # # # Stop services # docker compose down services: gunicorn: build: context: ../.. dockerfile: examples/http2_gevent/Dockerfile ports: - "8443:8443" volumes: - ./certs:/certs:ro environment: - GUNICORN_WORKERS=4 - GUNICORN_LOG_LEVEL=info healthcheck: test: ["CMD", "python", "-c", "import ssl,socket; s=socket.socket(); s.settimeout(2); ctx=ssl.create_default_context(); ctx.check_hostname=False; ctx.verify_mode=ssl.CERT_NONE; ss=ctx.wrap_socket(s,server_hostname='localhost'); ss.connect(('localhost',8443)); ss.close()"] interval: 5s timeout: 5s retries: 10 start_period: 10s restart: unless-stopped deploy: resources: limits: cpus: '2' memory: 512M networks: default: driver: bridge benoitc-gunicorn-f5fb19e/examples/http2_gevent/generate_certs.sh000077500000000000000000000021431514360242400252430ustar00rootroot00000000000000#!/bin/bash # # Generate self-signed certificates for HTTP/2 testing. # # Usage: ./generate_certs.sh # set -e CERTS_DIR="./certs" CERT_FILE="$CERTS_DIR/server.crt" KEY_FILE="$CERTS_DIR/server.key" # Create certs directory if it doesn't exist mkdir -p "$CERTS_DIR" # Check if certificates already exist if [ -f "$CERT_FILE" ] && [ -f "$KEY_FILE" ]; then echo "Certificates already exist in $CERTS_DIR" echo "Delete them first if you want to regenerate." exit 0 fi echo "Generating self-signed certificate..." openssl req -x509 -newkey rsa:2048 \ -keyout "$KEY_FILE" \ -out "$CERT_FILE" \ -days 365 \ -nodes \ -subj "/CN=localhost/O=Gunicorn HTTP2 Example/C=US" \ -addext "subjectAltName=DNS:localhost,DNS:gunicorn,IP:127.0.0.1" # Set appropriate permissions chmod 644 "$CERT_FILE" chmod 600 "$KEY_FILE" echo "Certificates generated successfully:" echo " Certificate: $CERT_FILE" echo " Private Key: $KEY_FILE" echo "" echo "You can now start the server with:" echo " docker compose up -d" echo "" echo "Or run locally with:" echo " gunicorn --config gunicorn_conf.py app:app" benoitc-gunicorn-f5fb19e/examples/http2_gevent/gunicorn_conf.py000066400000000000000000000051731514360242400251230ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn configuration for HTTP/2 with gevent worker. This configuration demonstrates: - HTTP/2 protocol support with ALPN - Gevent async worker for high concurrency - SSL/TLS configuration - HTTP/2 specific tuning options """ import os import multiprocessing # Server socket bind = os.environ.get('GUNICORN_BIND', '0.0.0.0:8443') # Worker configuration worker_class = 'gevent' workers = int(os.environ.get('GUNICORN_WORKERS', multiprocessing.cpu_count() * 2 + 1)) worker_connections = 1000 # Max simultaneous clients per worker # HTTP protocols - enable HTTP/2 with HTTP/1.1 fallback http_protocols = "h2,h1" # SSL/TLS configuration (required for HTTP/2) # Default paths work in Docker; override with env vars for local testing _default_cert = '/certs/server.crt' if os.path.exists('/certs/server.crt') else 'certs/server.crt' _default_key = '/certs/server.key' if os.path.exists('/certs/server.key') else 'certs/server.key' certfile = os.environ.get('GUNICORN_CERTFILE', _default_cert) keyfile = os.environ.get('GUNICORN_KEYFILE', _default_key) # HTTP/2 specific settings http2_max_concurrent_streams = 128 # Max streams per connection http2_initial_window_size = 262144 # 256KB initial flow control window http2_max_frame_size = 16384 # Default frame size (16KB) http2_max_header_list_size = 65536 # Max header size # Timeouts timeout = 30 # Worker timeout graceful_timeout = 30 # Graceful shutdown timeout keepalive = 5 # Keep-alive connections # Logging loglevel = os.environ.get('GUNICORN_LOG_LEVEL', 'info') accesslog = '-' # Log to stdout errorlog = '-' # Log to stderr access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" %(L)s' # Process naming proc_name = 'gunicorn-http2-gevent' # Server mechanics daemon = False pidfile = None umask = 0 user = None group = None tmp_upload_dir = None def on_starting(server): """Called just before the master process is initialized.""" server.log.info("Starting HTTP/2 server with gevent worker...") server.log.info(f"Workers: {workers}, Connections per worker: {worker_connections}") server.log.info(f"HTTP/2 max streams: {http2_max_concurrent_streams}") def when_ready(server): """Called just after the server is started.""" server.log.info("HTTP/2 server is ready to accept connections") def worker_int(worker): """Called when a worker receives SIGINT or SIGQUIT.""" worker.log.info("Worker received interrupt signal") def worker_abort(worker): """Called when a worker receives SIGABRT.""" worker.log.warning("Worker aborted") benoitc-gunicorn-f5fb19e/examples/http2_gevent/test_http2_gevent.py000066400000000000000000000240741514360242400257430ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python """ Tests for HTTP/2 with gevent example. Run with: # Start the server first docker compose up -d # Run tests python test_http2_gevent.py # Or with pytest pytest test_http2_gevent.py -v Requirements: pip install httpx[http2] pytest pytest-asyncio """ import asyncio import sys import ssl import socket import time def check_server_available(host='localhost', port=8443, timeout=30): """Wait for server to become available.""" start = time.time() while time.time() - start < timeout: try: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE with socket.create_connection((host, port), timeout=2) as sock: with ctx.wrap_socket(sock, server_hostname=host): return True except (socket.error, ssl.SSLError, OSError): time.sleep(1) return False class TestHTTP2Gevent: """Test HTTP/2 functionality with gevent worker.""" BASE_URL = "https://localhost:8443" @classmethod def setup_class(cls): """Check server is available before running tests.""" if not check_server_available(): raise RuntimeError( "Server not available. Start it with: docker compose up -d" ) def get_client(self): """Create HTTP/2 client.""" import httpx return httpx.Client(http2=True, verify=False, timeout=30.0) def test_root_endpoint(self): """Test basic GET request returns HTTP/2.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/") assert response.status_code == 200 assert response.http_version == "HTTP/2" assert b"HTTP/2" in response.content or b"Gevent" in response.content def test_health_endpoint(self): """Test health check endpoint.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/health") assert response.status_code == 200 assert response.text == "OK" def test_echo_post(self): """Test POST echo endpoint.""" with self.get_client() as client: data = b"Hello HTTP/2 with Gevent!" response = client.post(f"{self.BASE_URL}/echo", content=data) assert response.status_code == 200 assert response.content == data def test_echo_large_body(self): """Test POST with large body (tests flow control).""" with self.get_client() as client: # 100KB of data data = b"X" * (100 * 1024) response = client.post(f"{self.BASE_URL}/echo", content=data) assert response.status_code == 200 assert len(response.content) == len(data) assert response.content == data def test_info_endpoint(self): """Test JSON info endpoint.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/info") assert response.status_code == 200 info = response.json() assert info['method'] == 'GET' assert info['path'] == '/info' assert 'gevent' in info['server'].lower() def test_large_response(self): """Test large response (1MB) - tests streaming and flow control.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/large") assert response.status_code == 200 assert len(response.content) == 1024 * 1024 assert response.content == b"X" * (1024 * 1024) def test_streaming_response(self): """Test server-sent events style streaming.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/stream") assert response.status_code == 200 assert b"chunk 0" in response.content assert b"chunk 9" in response.content def test_delay_endpoint(self): """Test delayed response.""" with self.get_client() as client: start = time.time() response = client.get(f"{self.BASE_URL}/delay?seconds=0.5") elapsed = time.time() - start assert response.status_code == 200 assert elapsed >= 0.4 # Allow some tolerance assert b"Delayed" in response.content def test_not_found(self): """Test 404 response.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/nonexistent") assert response.status_code == 404 def test_gevent_worker_header(self): """Test that gevent worker header is present.""" with self.get_client() as client: response = client.get(f"{self.BASE_URL}/") assert response.status_code == 200 assert response.headers.get('x-worker-type') == 'gevent' class TestHTTP2Concurrency: """Test HTTP/2 multiplexing with concurrent requests.""" BASE_URL = "https://localhost:8443" @classmethod def setup_class(cls): """Check server is available.""" if not check_server_available(): raise RuntimeError("Server not available") def test_concurrent_requests_sync(self): """Test multiple concurrent requests using threads.""" import httpx from concurrent.futures import ThreadPoolExecutor, as_completed def make_request(i): with httpx.Client(http2=True, verify=False, timeout=30.0) as client: response = client.get(f"{self.BASE_URL}/delay?seconds=0.2") return i, response.status_code num_requests = 10 with ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(make_request, i) for i in range(num_requests)] results = [f.result() for f in as_completed(futures)] assert len(results) == num_requests assert all(status == 200 for _, status in results) class TestHTTP2ConcurrencyAsync: """Async tests for HTTP/2 multiplexing.""" BASE_URL = "https://localhost:8443" @classmethod def setup_class(cls): """Check server is available.""" if not check_server_available(): raise RuntimeError("Server not available") def test_async_concurrent_requests(self): """Test concurrent requests with asyncio.""" import httpx async def run_concurrent(): async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: # Make 10 concurrent requests tasks = [ client.get(f"{self.BASE_URL}/delay?seconds=0.2") for _ in range(10) ] responses = await asyncio.gather(*tasks) return responses responses = asyncio.run(run_concurrent()) assert len(responses) == 10 assert all(r.status_code == 200 for r in responses) assert all(r.http_version == "HTTP/2" for r in responses) def test_async_multiple_streams(self): """Test that multiple concurrent streams work over single HTTP/2 connection. This test verifies that HTTP/2 can handle multiple concurrent requests, which is the foundation of multiplexing. Performance benefits depend on client library implementation and network conditions. """ import httpx async def run_test(): async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: # Send multiple concurrent requests tasks = [ client.get(f"{self.BASE_URL}/info") for _ in range(10) ] responses = await asyncio.gather(*tasks) return responses responses = asyncio.run(run_test()) # Verify all requests succeeded with HTTP/2 assert len(responses) == 10 assert all(r.status_code == 200 for r in responses) assert all(r.http_version == "HTTP/2" for r in responses) def run_basic_test(): """Run a basic test without pytest.""" print("Running basic HTTP/2 gevent test...") if not check_server_available(): print("ERROR: Server not available at https://localhost:8443") print("Start it with: docker compose up -d") return False try: import httpx except ImportError: print("ERROR: httpx not installed. Run: pip install httpx[http2]") return False try: with httpx.Client(http2=True, verify=False, timeout=30.0) as client: # Test basic request print(" Testing root endpoint...", end=" ") response = client.get("https://localhost:8443/") assert response.status_code == 200 assert response.http_version == "HTTP/2" print("OK") # Test echo print(" Testing echo endpoint...", end=" ") data = b"test data" response = client.post("https://localhost:8443/echo", content=data) assert response.content == data print("OK") # Test large response print(" Testing large response...", end=" ") response = client.get("https://localhost:8443/large") assert len(response.content) == 1024 * 1024 print("OK") # Test worker header print(" Testing gevent worker...", end=" ") response = client.get("https://localhost:8443/") assert response.headers.get('x-worker-type') == 'gevent' print("OK") print("\nAll basic tests passed!") return True except Exception as e: print(f"\nERROR: {e}") return False if __name__ == '__main__': # Check if pytest is available try: import pytest # Run with pytest if available sys.exit(pytest.main([__file__, '-v'])) except ImportError: # Run basic tests without pytest success = run_basic_test() sys.exit(0 if success else 1) benoitc-gunicorn-f5fb19e/examples/log_app.ini000066400000000000000000000007001514360242400214200ustar00rootroot00000000000000[app:main] paste.app_factory = log_app:app_factory [server:main] use = egg:gunicorn#main host = 127.0.0.1 port = 8080 workers = 3 [loggers] keys=root [handlers] keys=console [formatters] keys=default [logger_root] level=INFO qualname=root handlers=console [handler_console] class=StreamHandler formatter=default args=(sys.stdout, ) [formatter_default] format=[%(asctime)s] [%(levelname)-7s] - %(process)d:%(name)s:%(funcName)s - %(message)s benoitc-gunicorn-f5fb19e/examples/log_app.py000066400000000000000000000007361514360242400213020ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import logging log = logging.getLogger(__name__) log.addHandler(logging.StreamHandler()) def app_factory(global_options, **local_options): return app def app(environ, start_response): start_response("200 OK", []) log.debug("Hello Debug!") log.info("Hello Info!") log.warn("Hello Warn!") log.error("Hello Error!") return [b"Hello World!\n"] benoitc-gunicorn-f5fb19e/examples/logging.conf000066400000000000000000000015111514360242400215740ustar00rootroot00000000000000[loggers] keys=root, gunicorn.error, gunicorn.access [handlers] keys=console, error_file, access_file [formatters] keys=generic, access [logger_root] level=INFO handlers=console [logger_gunicorn.error] level=INFO handlers=error_file propagate=1 qualname=gunicorn.error [logger_gunicorn.access] level=INFO handlers=access_file propagate=0 qualname=gunicorn.access [handler_console] class=StreamHandler formatter=generic args=(sys.stdout, ) [handler_error_file] class=logging.FileHandler formatter=generic args=('/tmp/gunicorn.error.log',) [handler_access_file] class=logging.FileHandler formatter=access args=('/tmp/gunicorn.access.log',) [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] %(message)s datefmt=%Y-%m-%d %H:%M:%S class=logging.Formatter [formatter_access] format=%(message)s class=logging.Formatter benoitc-gunicorn-f5fb19e/examples/longpoll.py000066400000000000000000000013031514360242400214760ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import sys import time class TestIter: def __iter__(self): lines = [b'line 1\n', b'line 2\n'] for line in lines: yield line time.sleep(20) def app(environ, start_response): """Application which cooperatively pauses 20 seconds (needed to surpass normal timeouts) before responding""" status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Transfer-Encoding', "chunked"), ] sys.stdout.write('request received') sys.stdout.flush() start_response(status, response_headers) return TestIter() benoitc-gunicorn-f5fb19e/examples/multiapp.py000066400000000000000000000026401514360242400215100ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Run this application with: # # $ gunicorn multiapp:app # # And then visit: # # http://127.0.0.1:8000/app1url # http://127.0.0.1:8000/app2url # http://127.0.0.1:8000/this_is_a_404 # try: from routes import Mapper except ImportError: print("This example requires Routes to be installed") # Obviously you'd import your app callables # from different places... from test import app as app1 from test import app as app2 class Application: def __init__(self): self.map = Mapper() self.map.connect('app1', '/app1url', app=app1) self.map.connect('app2', '/app2url', app=app2) def __call__(self, environ, start_response): match = self.map.routematch(environ=environ) if not match: return self.error404(environ, start_response) return match[0]['app'](environ, start_response) def error404(self, environ, start_response): html = b"""\ 404 - Not Found

404 - Not Found

""" headers = [ ('Content-Type', 'text/html'), ('Content-Length', str(len(html))) ] start_response('404 Not Found', headers) return [html] app = Application() benoitc-gunicorn-f5fb19e/examples/multidomainapp.py000066400000000000000000000017131514360242400227000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import re class SubDomainApp: """WSGI application to delegate requests based on domain name. """ def __init__(self, mapping): self.mapping = mapping def __call__(self, environ, start_response): host = environ.get("HTTP_HOST", "") host = host.split(":")[0] # strip port for pattern, app in self.mapping: if re.match("^" + pattern + "$", host): return app(environ, start_response) else: start_response("404 Not Found", []) return [b""] def hello(environ, start_response): start_response("200 OK", [("Content-Type", "text/plain")]) return [b"Hello, world\n"] def bye(environ, start_response): start_response("200 OK", [("Content-Type", "text/plain")]) return [b"Goodbye!\n"] app = SubDomainApp([ ("localhost", hello), (".*", bye) ]) benoitc-gunicorn-f5fb19e/examples/nginx.conf000066400000000000000000000036741514360242400213050ustar00rootroot00000000000000worker_processes 1; user nobody nogroup; # 'user nobody nobody;' for systems with 'nobody' as a group instead error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; # increase if you have lots of clients accept_mutex off; # set to 'on' if nginx worker_processes > 1 # 'use epoll;' to enable for Linux 2.6+ # 'use kqueue;' to enable for FreeBSD, OSX } http { include mime.types; # fallback in case we can't determine a type default_type application/octet-stream; access_log /var/log/nginx/access.log combined; sendfile on; upstream app_server { # fail_timeout=0 means we always retry an upstream even if it failed # to return a good HTTP response # for UNIX domain socket setups server unix:/tmp/gunicorn.sock fail_timeout=0; # for a TCP configuration # server 192.168.0.7:8000 fail_timeout=0; } server { # if no Host match, close the connection to prevent host spoofing listen 80 default_server; return 444; } server { # use 'listen 80 deferred;' for Linux # use 'listen 80 accept_filter=httpready;' for FreeBSD listen 80; client_max_body_size 4G; # set the correct host(s) for your site server_name example.com www.example.com; keepalive_timeout 5; # path for static files root /path/to/app/current/public; location / { # checks for static file, if not found proxy to app try_files $uri @proxy_to_app; } location @proxy_to_app { proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $http_host; # we don't want nginx trying to do something clever with # redirects, we set the Host: header above already. proxy_redirect off; proxy_pass http://app_server; } error_page 500 502 503 504 /500.html; location = /500.html { root /path/to/app/current/public; } } } benoitc-gunicorn-f5fb19e/examples/read_django_settings.py000066400000000000000000000007751514360242400240410ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Use this config file in your script like this: $ gunicorn project_name.wsgi:application -c read_django_settings.py """ settings_dict = {} with open('frameworks/django/testing/testing/settings.py') as f: exec(f.read(), settings_dict) loglevel = 'warning' proc_name = 'web-project' workers = 1 if settings_dict['DEBUG']: loglevel = 'debug' reload = True proc_name += '_debug' benoitc-gunicorn-f5fb19e/examples/readline_app.py000066400000000000000000000017411514360242400223010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Simple example of readline, reading from a stream then echoing the response # # Usage: # # Launch a server with the app in a terminal # # $ gunicorn -w3 readline_app:app # # Then in another terminal launch the following command: # # $ curl -XPOST -d'test\r\ntest2\r\n' -H"Transfer-Encoding: Chunked" http://localhost:8000 from gunicorn import __version__ def app(environ, start_response): """Simplest possible application object""" status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Transfer-Encoding', "chunked"), ('X-Gunicorn-Version', __version__) ] start_response(status, response_headers) body = environ['wsgi.input'] lines = [] while True: line = body.readline() if line == b"": break print(line) lines.append(line) return iter(lines) benoitc-gunicorn-f5fb19e/examples/sendfile.py000066400000000000000000000011071514360242400214430ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Example code from Eventlet sources import os from wsgiref.validate import validator # @validator # breaks sendfile def app(environ, start_response): """Simplest possible application object""" status = '200 OK' fname = os.path.join(os.path.dirname(__file__), "hello.txt") f = open(fname, 'rb') response_headers = [ ('Content-type', 'text/plain'), ] start_response(status, response_headers) return environ['wsgi.file_wrapper'](f) benoitc-gunicorn-f5fb19e/examples/server.crt000066400000000000000000000023511514360242400213220ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDdDCCAlwCCQC3MfdcOMwt6DANBgkqhkiG9w0BAQUFADB8MQswCQYDVQQGEwJG UjERMA8GA1UECBMIUGljYXJkaWUxDjAMBgNVBAcTBUNyZWlsMREwDwYDVQQKEwhn dW5pY29ybjEVMBMGA1UEAxMMZ3VuaWNvcm4ub3JnMSAwHgYJKoZIhvcNAQkBFhF1 c2VyQGd1bmljb3JuLm9yZzAeFw0xMjEyMTQwODI2MDJaFw0xMzEyMTQwODI2MDJa MHwxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhQaWNhcmRpZTEOMAwGA1UEBxMFQ3Jl aWwxETAPBgNVBAoTCGd1bmljb3JuMRUwEwYDVQQDEwxndW5pY29ybi5vcmcxIDAe BgkqhkiG9w0BCQEWEXVzZXJAZ3VuaWNvcm4ub3JnMIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEAy9RQSiGpB+HyjMpRCEfV9M/4g7gXq/qRizxDspJujoBz SW0d4FqMHaSRX2QOA+euhtlOYTgsvWZcyv5cvDfL1CtrNWSVBrlo7wIy5tg60Z3A JnWT/Zxj4WIqkPwdglB1sRBsI1Fn0o6nJu4HekZedXDK6fua4lOPfsQG84EhRQKS Mz2o7Nesk8/UMjb+5WoRmG7mxrpe0/OYlnydqzqwHUQ+I5CHl1kOhePo9ZBTFMA5 Ece8kGQs37rFCEy92xCYHDgp+CjjyYbeBskF3o0/a88K2bt8J7uXkn4h14HjtFHq fYnqn60cwyIx3T/uMUh6EmhKQezaw60xyIivmjH8tQIDAQABMA0GCSqGSIb3DQEB BQUAA4IBAQAKu7kzTAqONFI1qC6mnwAixSd7ml6RtyQRiIWjg4FyTJmS2NMlqUSI CiV1g1+pv2cy9amld8hoO17ISYFZqMoRxJgD5GuN4y1lUefFe95GHI9loubIJZlR 5KlZEvCiaAQoGvYiacf4BNkljyrwgPVM5e71dGon7jyghmV6yUaUL6+1J8BU/KYg jz8RtMtptqkwKPKQVfuDcr/eoH6uZwPRbyfqSui8SuMz3Df6Dnx1hOtlQRJC6eNo U9L3jkmCsbbMNBAz6iQjyFHFa9iqzwL7nvqZTryjmI5Dpn+BnT7Q5cduK+N5vt4+ RjNVrz/l6+nR68B5GO96zUTV3/KrEmFr -----END CERTIFICATE----- benoitc-gunicorn-f5fb19e/examples/server.key000066400000000000000000000032171514360242400213240ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAy9RQSiGpB+HyjMpRCEfV9M/4g7gXq/qRizxDspJujoBzSW0d 4FqMHaSRX2QOA+euhtlOYTgsvWZcyv5cvDfL1CtrNWSVBrlo7wIy5tg60Z3AJnWT /Zxj4WIqkPwdglB1sRBsI1Fn0o6nJu4HekZedXDK6fua4lOPfsQG84EhRQKSMz2o 7Nesk8/UMjb+5WoRmG7mxrpe0/OYlnydqzqwHUQ+I5CHl1kOhePo9ZBTFMA5Ece8 kGQs37rFCEy92xCYHDgp+CjjyYbeBskF3o0/a88K2bt8J7uXkn4h14HjtFHqfYnq n60cwyIx3T/uMUh6EmhKQezaw60xyIivmjH8tQIDAQABAoIBAQDFzhTc3C2daLhp yS06S/xmyCz0JwNR8qir5qAL++8ue5ll+G61+yle2wX4/LBdOck1NE3MKye/5kbG +HImdj9od3pjJmk5TVV4HToorE7ofZ6rtA8aX1rOruWALiq0/EA6xSUsYSPQQoAU V4sKLqAceIly6Kk2WsE21CWqyfXvcQOtfBYmFwmPCImWecJLypeheEpz1U2EYl65 u6b0NsXeODrLXEAEFjdb6UBJtzRtTJ/OnbDvghu9xMjlT0Pj+inoAv/ePZB8bmvH XGhZo7dzgsDZ+eys7XnbeggUOhFImzCjO1f3pIVXWThGDgKIrpc9Evac2Q3AjTFY NV9HBV9BAoGBAOyWq7HDgeCEu54orPAmdkO4j/HFX+262BTQoVCg4OX3Iv9A/lH4 lpVGrFlK0qJF9jb7mjDmXP2LW0fwzyHe42DGFbZkKdfiMBuE+qoPeAV9s+SjE4H3 l3tHoAOFUt2wITcHK4EYjoLMAgrbRNiv9t/gqiMm1oIb3fkUbpOoGG3LAoGBANyN kLop3JfN1Kzto7gJq/tLS21joexTU+s4EJ+a4Q8KH1d47DLI+4119Ot+NWwi9X3S sbOKZOjfrGw9+HPI64i7hD9HPSK58IUbsfVR9vPlPei45inRfi6s7+EUzKifOKZU o1ecpOSPYQHZtDToGcQCTS0IFwMXHgrkP380We9/AoGBAI1wljyz8RVUxQWMs7bu h4187TFRGkR5i20GPSqCw3E4CkgnhuNihkO/+JF5VeuFf+jnCgtp7PX3Nh8QLATH x4+3XIup3goeQzxwh5rbnJlLyRxLEgKFDp6490SjlCLMhU7sjmmjUK+JXz82TzZs HF9DZPOW6G7oUg/y0xibSd95AoGAXpDEcU3pq50xh0QNYqei+gh6uthxYScJYF2V oxmBTjWE4riSbeQHF8xvy1k+BrOmluB0GQtJ4R+minK3yM1pUCM2vPsKl40qN6h8 UTdnr4OnW9WLunp8o/66i8OjTNmYLJk1wCcF/IoNigGSZuztv0FNXfWOCGEtHHZp U11bAnkCgYEAoU0sdFL3IfmxnNQ9CDmgXdJM0SpUm4ECd2jM/fRdgLelL+WislCF gHjZw+3mplIzqQ9DMwRkjbaIxP0H92OopOBIqmShWUuzWw/Dj0L8PGe/7skcwsGD /VLEkGzrxJwP4kokUu1kvLOqHM429JXsb8wO16iMQAB93yUZ+X8PGfQ= -----END RSA PRIVATE KEY----- benoitc-gunicorn-f5fb19e/examples/slowclient.py000066400000000000000000000011061514360242400220340ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import sys import time def app(environ, start_response): """Application which cooperatively pauses 10 seconds before responding""" data = b'Hello, World!\n' status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', str(len(data))), ] sys.stdout.write('request received, pausing 10 seconds') sys.stdout.flush() time.sleep(10) start_response(status, response_headers) return iter([data]) benoitc-gunicorn-f5fb19e/examples/standalone_app.py000066400000000000000000000026151514360242400226470ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python # # An example of a standalone application using the internal API of Gunicorn. # # $ python standalone_app.py # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import multiprocessing import gunicorn.app.base def number_of_workers(): return (multiprocessing.cpu_count() * 2) + 1 def handler_app(environ, start_response): response_body = b'Works fine' status = '200 OK' response_headers = [ ('Content-Type', 'text/plain'), ] start_response(status, response_headers) return [response_body] class StandaloneApplication(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super().__init__() def load_config(self): config = {key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None} for key, value in config.items(): self.cfg.set(key.lower(), value) def load(self): return self.application if __name__ == '__main__': options = { 'bind': '%s:%s' % ('127.0.0.1', '8080'), 'workers': number_of_workers(), } StandaloneApplication(handler_app, options).run() benoitc-gunicorn-f5fb19e/examples/streaming_chat/000077500000000000000000000000001514360242400222715ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/streaming_chat/Dockerfile000066400000000000000000000005771514360242400242740ustar00rootroot00000000000000FROM python:3.12-slim WORKDIR /app # Install dependencies RUN pip install --no-cache-dir \ fastapi \ pydantic # Copy gunicorn source COPY . /app/gunicorn-src RUN pip install /app/gunicorn-src # Copy app COPY examples/streaming_chat /app/streaming_chat ENV PYTHONPATH=/app EXPOSE 8000 CMD ["gunicorn", "streaming_chat.main:app", "-c", "streaming_chat/gunicorn_conf.py"] benoitc-gunicorn-f5fb19e/examples/streaming_chat/README.md000066400000000000000000000124751514360242400235610ustar00rootroot00000000000000# Streaming Chat Example A FastAPI-based chat demo that simulates LLM token-by-token streaming, powered by Gunicorn's dirty workers for efficient long-running operations. ## Overview This example demonstrates how to build a streaming chat API that: - Streams tokens word-by-word like ChatGPT (Server-Sent Events) - Uses dirty workers for the "inference" workload - Includes a browser-based chat UI for testing - Requires no ML dependencies (simulated responses) ## Architecture ``` ┌─────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ │ Browser/curl │────►│ FastAPI (ASGI) │────►│ DirtyWorker │ │ SSE stream │ │ - /chat (SSE) │ │ - ChatApp │ │ │◄────│ - /chat/sync │◄────│ - Token generator │ └─────────────────┘ └──────────────────┘ └─────────────────────┘ │ ▼ text/event-stream data: {"token": "Hello"} data: {"token": " "} data: {"token": "world"} data: [DONE] ``` **Why streaming with dirty workers?** - Real LLM inference is slow (seconds to minutes) - Users expect to see responses appear gradually - Dirty workers keep the "model" loaded between requests - HTTP workers remain responsive during streaming ## Quick Start ### With Docker (recommended) ```bash cd examples/streaming_chat docker compose up --build ``` Then open http://localhost:8000 in your browser. ### Local Development ```bash # Install dependencies pip install fastapi pydantic # Run with gunicorn gunicorn examples.streaming_chat.main:app \ -c examples/streaming_chat/gunicorn_conf.py ``` ## API Reference ### POST /chat Stream a chat response using Server-Sent Events. **Request:** ```json { "prompt": "hello", "thinking": false } ``` **Response:** `text/event-stream` ``` data: {"token": "Hello"} data: {"token": "!"} data: {"token": " "} data: {"token": "I'm"} ... data: [DONE] ``` **Example with curl:** ```bash curl -N http://localhost:8000/chat \ -H "Content-Type: application/json" \ -d '{"prompt": "hello"}' ``` ### POST /chat/sync Non-streaming version that returns the complete response. **Request:** ```json { "prompt": "hello" } ``` **Response:** ```json { "response": "Hello! I'm a simulated AI assistant..." } ``` ### GET /health Health check endpoint. **Response:** ```json {"status": "ok"} ``` ### GET / Browser-based chat UI for testing. ## Configuration Edit `gunicorn_conf.py` to adjust: | Setting | Default | Description | |---------|---------|-------------| | `workers` | 2 | Number of HTTP workers | | `dirty_workers` | 1 | Number of dirty workers | | `dirty_timeout` | 60 | Max seconds per request | | `bind` | 0.0.0.0:8000 | Listen address | ## Prompts The simulated chat app responds to these keywords: | Keyword | Response | |---------|----------| | `hello`, `hi`, `hey` | Greeting message | | `explain` | Explanation of dirty workers | | `streaming` | How streaming works | | `code` | Example code snippet | | (default) | Generic thoughtful response | ## Features Demonstrated 1. **Token streaming** - Word-by-word output via generators 2. **SSE protocol** - Browser-compatible event streaming 3. **Async generators** - Using `stream_async()` from dirty client 4. **Thinking mode** - Multi-phase streaming with visible "thinking" 5. **Browser UI** - Interactive chat with cursor animation ## Testing Run the integration tests: ```bash # Start the service first docker compose up -d # Run tests pip install requests python test_streaming.py ``` ## Adapting for Real LLMs To use a real LLM instead of simulated responses: ```python # chat_app.py from gunicorn.dirty.app import DirtyApp class ChatApp(DirtyApp): def init(self): from transformers import pipeline self.generator = pipeline("text-generation", model="gpt2") def generate(self, prompt): for output in self.generator(prompt, max_new_tokens=100, do_sample=True): # Yield tokens as they're generated yield output["generated_text"] def close(self): del self.generator ``` Or with an API-based LLM: ```python class ChatApp(DirtyApp): def init(self): import openai self.client = openai.OpenAI() async def generate(self, prompt): stream = self.client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": prompt}], stream=True ) for chunk in stream: if chunk.choices[0].delta.content: yield chunk.choices[0].delta.content ``` ## Production Considerations 1. **Real LLM**: Replace `ChatApp` with actual model inference 2. **GPU Support**: Add CUDA to Dockerfile for faster inference 3. **Rate Limiting**: Add FastAPI middleware for rate limiting 4. **Authentication**: Add API key validation 5. **Monitoring**: Add Prometheus metrics endpoint 6. **Timeouts**: Adjust `dirty_timeout` based on max response length benoitc-gunicorn-f5fb19e/examples/streaming_chat/__init__.py000066400000000000000000000003151514360242400244010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Streaming Chat Example # Demonstrates dirty worker streaming with simulated LLM token generation benoitc-gunicorn-f5fb19e/examples/streaming_chat/chat_app.py000066400000000000000000000114571514360242400244320ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import time import random from gunicorn.dirty.app import DirtyApp class ChatApp(DirtyApp): """Simulated LLM chat application demonstrating streaming responses. This app mimics LLM token-by-token generation without requiring heavy ML dependencies. Each response is streamed word-by-word with realistic timing delays. """ def init(self): """Initialize canned responses for different prompts.""" self.responses = { "hello": ( "Hello! I'm a simulated AI assistant running on Gunicorn's " "dirty workers. I can demonstrate streaming responses just " "like a real LLM, but without the heavy ML dependencies. " "How can I help you today?" ), "explain": ( "Dirty workers are separate processes that handle long-running " "tasks like ML inference. They keep models loaded in memory " "across requests, avoiding expensive reload times. HTTP workers " "remain lightweight and responsive while dirty workers handle " "the heavy computation. This architecture is inspired by " "Erlang's dirty schedulers." ), "streaming": ( "Streaming works by yielding chunks from a generator function. " "Each yield sends a chunk message through the IPC socket. The " "client receives chunks as they're produced, enabling real-time " "token-by-token display. This is perfect for LLM applications " "where users expect to see responses appear gradually." ), "code": ( "Here's a simple example:\n\n" "```python\n" "from gunicorn.dirty import get_dirty_client\n\n" "client = get_dirty_client()\n" "for token in client.stream('app:ChatApp', 'generate', prompt):\n" " print(token, end='', flush=True)\n" "```\n\n" "This streams tokens directly to the console as they arrive." ), "default": ( "I understand your question. Let me think about that for a " "moment. The key insight here is that streaming responses " "provide a much better user experience for long-running " "operations. Instead of waiting for the complete response, " "users see content appearing in real-time, which feels more " "interactive and responsive." ), } self.min_delay = 0.03 # Minimum delay between tokens (30ms) self.max_delay = 0.08 # Maximum delay between tokens (80ms) def generate(self, prompt): """Generate a streaming response for the given prompt. Yields tokens (words) one at a time with realistic delays to simulate LLM inference. Args: prompt: User's input prompt Yields: str: Individual tokens (words with trailing space) """ response = self._get_response(prompt) words = response.split() for i, word in enumerate(words): # Simulate variable inference time delay = random.uniform(self.min_delay, self.max_delay) time.sleep(delay) # Add space after word (except last word) if i < len(words) - 1: yield word + " " else: yield word def generate_with_thinking(self, prompt): """Generate response with visible 'thinking' phase. First yields thinking indicators, then streams the response. Demonstrates multi-phase streaming. Args: prompt: User's input prompt Yields: str: Thinking indicators followed by response tokens """ # Thinking phase yield "[thinking" for _ in range(3): time.sleep(0.3) yield "." yield "]\n\n" # Response phase yield from self.generate(prompt) def _get_response(self, prompt): """Match prompt to a canned response. Args: prompt: User's input prompt Returns: str: Matched response text """ prompt_lower = prompt.lower().strip() # Check for keyword matches for key, response in self.responses.items(): if key in prompt_lower: return response # Greeting patterns if any(g in prompt_lower for g in ["hi", "hey", "greetings"]): return self.responses["hello"] return self.responses["default"] def close(self): """Cleanup on shutdown.""" pass benoitc-gunicorn-f5fb19e/examples/streaming_chat/demo_capture.txt000066400000000000000000000107071514360242400255060ustar00rootroot00000000000000================================================================================ STREAMING CHAT DEMO CAPTURE Gunicorn Dirty Workers + FastAPI SSE ================================================================================ $ curl -s http://127.0.0.1:8000/health {"status":"ok"} ================================================================================ TEST 1: Hello Prompt ================================================================================ $ curl -N http://127.0.0.1:8000/chat -d '{"prompt": "hello"}' data: {"token": "Hello! "} data: {"token": "I'm "} data: {"token": "a "} data: {"token": "simulated "} data: {"token": "AI "} data: {"token": "assistant "} data: {"token": "running "} data: {"token": "on "} data: {"token": "Gunicorn's "} data: {"token": "dirty "} data: {"token": "workers. "} data: {"token": "I "} data: {"token": "can "} data: {"token": "demonstrate "} data: {"token": "streaming "} data: {"token": "responses "} data: {"token": "just "} data: {"token": "like "} data: {"token": "a "} data: {"token": "real "} data: {"token": "LLM, "} data: {"token": "but "} data: {"token": "without "} data: {"token": "the "} data: {"token": "heavy "} data: {"token": "ML "} data: {"token": "dependencies. "} data: {"token": "How "} data: {"token": "can "} data: {"token": "I "} data: {"token": "help "} data: {"token": "you "} data: {"token": "today?"} data: [DONE] ================================================================================ TEST 2: Explain Dirty Workers ================================================================================ $ curl -N http://127.0.0.1:8000/chat -d '{"prompt": "explain dirty workers"}' data: {"token": "Dirty "} data: {"token": "workers "} data: {"token": "are "} data: {"token": "separate "} data: {"token": "processes "} data: {"token": "that "} data: {"token": "handle "} data: {"token": "long-running "} data: {"token": "tasks "} data: {"token": "like "} data: {"token": "ML "} data: {"token": "inference. "} data: {"token": "They "} data: {"token": "keep "} data: {"token": "models "} data: {"token": "loaded "} data: {"token": "in "} data: {"token": "memory "} data: {"token": "across "} data: {"token": "requests, "} data: {"token": "avoiding "} data: {"token": "expensive "} data: {"token": "reload "} data: {"token": "times. "} data: {"token": "HTTP "} data: {"token": "workers "} data: {"token": "remain "} data: {"token": "lightweight "} data: {"token": "and "} data: {"token": "responsive "} data: {"token": "while "} data: {"token": "dirty "} data: {"token": "workers "} data: {"token": "handle "} data: {"token": "the "} data: {"token": "heavy "} data: {"token": "computation. "} data: {"token": "This "} data: {"token": "architecture "} data: {"token": "is "} data: {"token": "inspired "} data: {"token": "by "} data: {"token": "Erlang's "} data: {"token": "dirty "} data: {"token": "schedulers."} data: [DONE] ================================================================================ TEST 3: Sync Endpoint ================================================================================ $ curl -s http://127.0.0.1:8000/chat/sync -d '{"prompt": "hello"}' {"response":"Hello! I'm a simulated AI assistant running on Gunicorn's dirty workers. I can demonstrate streaming responses just like a real LLM, but without the heavy ML dependencies. How can I help you today?"} ================================================================================ DEMO COMPLETE ================================================================================ Browser UI available at: http://localhost:8000/ Features demonstrated: - Token-by-token SSE streaming - Async generators via dirty workers - Different responses based on keywords - Sync endpoint for comparison - Health check endpoint Server Logs: [INFO] Starting gunicorn 24.1.0 [INFO] Listening at: http://0.0.0.0:8000 (1) [INFO] Using worker: asgi [INFO] Spawned dirty arbiter (pid: 7) [INFO] Dirty arbiter starting (pid: 7) [INFO] Booting worker with pid: 8 [INFO] Dirty arbiter listening on /tmp/gunicorn-dirty-.../arbiter.sock [INFO] Spawned dirty worker (pid: 9) [INFO] Initialized dirty app: streaming_chat.chat_app:ChatApp [INFO] Dirty worker 9 listening on /tmp/gunicorn-dirty-.../worker-1.sock [INFO] ASGI server listening on http://0.0.0.0:8000 benoitc-gunicorn-f5fb19e/examples/streaming_chat/docker-compose.yml000066400000000000000000000005571514360242400257350ustar00rootroot00000000000000services: streaming-chat: build: context: ../.. dockerfile: examples/streaming_chat/Dockerfile ports: - "8000:8000" healthcheck: test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health', timeout=5)"] interval: 10s timeout: 5s retries: 5 start_period: 5s benoitc-gunicorn-f5fb19e/examples/streaming_chat/gunicorn_conf.py000066400000000000000000000004171514360242400254760ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. bind = "0.0.0.0:8000" workers = 2 worker_class = "asgi" # Dirty worker config dirty_apps = ["streaming_chat.chat_app:ChatApp"] dirty_workers = 1 dirty_timeout = 60 benoitc-gunicorn-f5fb19e/examples/streaming_chat/main.py000066400000000000000000000201551514360242400235720ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import json from fastapi import FastAPI from fastapi.responses import StreamingResponse, HTMLResponse from pydantic import BaseModel from gunicorn.dirty.client import get_dirty_client_async app = FastAPI( title="Streaming Chat Demo", description="Demonstrates dirty worker streaming with simulated LLM responses", ) class ChatRequest(BaseModel): prompt: str thinking: bool = False class ChatResponse(BaseModel): response: str @app.post("/chat") async def chat(request: ChatRequest): """Stream a chat response using Server-Sent Events. The response is streamed token-by-token, simulating LLM inference. Each token is sent as an SSE event with JSON data. Args: request: Chat request with prompt and optional thinking mode Returns: StreamingResponse with text/event-stream content type """ client = await get_dirty_client_async() action = "generate_with_thinking" if request.thinking else "generate" async def stream(): async for token in client.stream_async( "streaming_chat.chat_app:ChatApp", action, request.prompt ): data = json.dumps({"token": token}) yield f"data: {data}\n\n" yield "data: [DONE]\n\n" return StreamingResponse( stream(), media_type="text/event-stream", headers={ "Cache-Control": "no-cache", "Connection": "keep-alive", "X-Accel-Buffering": "no", # Disable nginx buffering } ) @app.post("/chat/sync", response_model=ChatResponse) async def chat_sync(request: ChatRequest): """Non-streaming chat endpoint for comparison. Waits for the complete response before returning. Useful for testing or when streaming isn't needed. Args: request: Chat request with prompt Returns: Complete response as JSON """ client = await get_dirty_client_async() action = "generate_with_thinking" if request.thinking else "generate" tokens = [] async for token in client.stream_async( "streaming_chat.chat_app:ChatApp", action, request.prompt ): tokens.append(token) return ChatResponse(response="".join(tokens)) @app.get("/health") async def health(): """Health check endpoint.""" return {"status": "ok"} @app.get("/", response_class=HTMLResponse) async def index(): """Simple chat UI for testing streaming.""" return """ Streaming Chat Demo

Streaming Chat Demo

This demo shows token-by-token streaming using Gunicorn's dirty workers.

hello explain streaming code
""" benoitc-gunicorn-f5fb19e/examples/streaming_chat/requirements.txt000066400000000000000000000000411514360242400255500ustar00rootroot00000000000000fastapi>=0.100.0 pydantic>=2.0.0 benoitc-gunicorn-f5fb19e/examples/streaming_chat/test_streaming.py000066400000000000000000000110131514360242400256670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Integration tests for the streaming chat example.""" import json import os import requests def test_health_endpoint(): """Test the health check endpoint.""" base_url = os.environ.get("STREAMING_CHAT_URL", "http://127.0.0.1:8000") response = requests.get(f"{base_url}/health") assert response.status_code == 200 assert response.json() == {"status": "ok"} print("Health check: OK") def test_streaming_chat(): """Test that chat endpoint streams tokens via SSE.""" base_url = os.environ.get("STREAMING_CHAT_URL", "http://127.0.0.1:8000") response = requests.post( f"{base_url}/chat", json={"prompt": "hello"}, stream=True, headers={"Accept": "text/event-stream"} ) assert response.status_code == 200 assert response.headers.get("content-type") == "text/event-stream; charset=utf-8" tokens = [] for line in response.iter_lines(decode_unicode=True): if line.startswith("data: "): data = line[6:] if data == "[DONE]": break parsed = json.loads(data) tokens.append(parsed["token"]) # Verify we got multiple tokens (streaming worked) assert len(tokens) > 1, f"Expected multiple tokens, got {len(tokens)}" # Verify tokens form a coherent response full_response = "".join(tokens) assert len(full_response) > 10, "Response too short" assert "Hello" in full_response or "hello" in full_response.lower() print(f"Streaming chat: OK (received {len(tokens)} tokens)") def test_sync_chat(): """Test the non-streaming chat endpoint.""" base_url = os.environ.get("STREAMING_CHAT_URL", "http://127.0.0.1:8000") response = requests.post( f"{base_url}/chat/sync", json={"prompt": "hello"} ) assert response.status_code == 200 data = response.json() assert "response" in data assert len(data["response"]) > 10 print("Sync chat: OK") def test_thinking_mode(): """Test streaming with thinking phase enabled.""" base_url = os.environ.get("STREAMING_CHAT_URL", "http://127.0.0.1:8000") response = requests.post( f"{base_url}/chat", json={"prompt": "hello", "thinking": True}, stream=True ) assert response.status_code == 200 tokens = [] for line in response.iter_lines(decode_unicode=True): if line.startswith("data: "): data = line[6:] if data == "[DONE]": break parsed = json.loads(data) tokens.append(parsed["token"]) full_response = "".join(tokens) assert "[thinking" in full_response, "Thinking phase not present" assert "...]" in full_response or "..]\n" in full_response.replace(".", ""), \ "Thinking dots not present" print("Thinking mode: OK") def test_different_prompts(): """Test that different prompts get different responses.""" base_url = os.environ.get("STREAMING_CHAT_URL", "http://127.0.0.1:8000") prompts = ["hello", "explain dirty workers", "how does streaming work?"] responses = [] for prompt in prompts: response = requests.post( f"{base_url}/chat/sync", json={"prompt": prompt} ) assert response.status_code == 200 responses.append(response.json()["response"]) # Verify responses are different assert len(set(responses)) == len(responses), \ "Expected different responses for different prompts" print("Different prompts: OK") def test_sse_format(): """Test that SSE format is correct.""" base_url = os.environ.get("STREAMING_CHAT_URL", "http://127.0.0.1:8000") response = requests.post( f"{base_url}/chat", json={"prompt": "hello"}, stream=True ) raw_lines = [] for line in response.iter_lines(decode_unicode=True): raw_lines.append(line) # Check SSE format: lines should be "data: ..." or empty for line in raw_lines: assert line == "" or line.startswith("data: "), \ f"Invalid SSE line: {line}" # Should end with [DONE] data_lines = [line for line in raw_lines if line.startswith("data: ")] assert data_lines[-1] == "data: [DONE]", "Missing [DONE] terminator" print("SSE format: OK") if __name__ == "__main__": test_health_endpoint() test_streaming_chat() test_sync_chat() test_thinking_mode() test_different_prompts() test_sse_format() print("\nAll tests passed!") benoitc-gunicorn-f5fb19e/examples/supervisor.conf000066400000000000000000000002661514360242400223750ustar00rootroot00000000000000[program:gunicorn] command=/usr/local/bin/gunicorn main:application -c /path/to/project/gunicorn.conf.py directory=/path/to/project user=nobody autorestart=true redirect_stderr=true benoitc-gunicorn-f5fb19e/examples/test.py000066400000000000000000000011751514360242400206360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # # Example code from Eventlet sources from wsgiref.validate import validator from gunicorn import __version__ @validator def app(environ, start_response): """Simplest possible application object""" data = b'Hello, World!\n' status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', str(len(data))), ('X-Gunicorn-Version', __version__), ('Foo', 'B\u00e5r'), # Foo: Bår ] start_response(status, response_headers) return iter([data]) benoitc-gunicorn-f5fb19e/examples/timeout.py000066400000000000000000000011261514360242400213410ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import sys import time def app(environ, start_response): """Application which pauses 35 seconds before responding. the worker will timeout in default case.""" data = b'Hello, World!\n' status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', str(len(data))), ] sys.stdout.write('request will timeout') sys.stdout.flush() time.sleep(35) start_response(status, response_headers) return iter([data]) benoitc-gunicorn-f5fb19e/examples/websocket/000077500000000000000000000000001514360242400212675ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/examples/websocket/gevent_websocket.py000066400000000000000000000366231514360242400252110ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import collections import errno import re import hashlib import base64 from base64 import b64encode, b64decode import socket import struct import logging from socket import error as SocketError import gevent from gunicorn.workers.base_async import ALREADY_HANDLED logger = logging.getLogger(__name__) WS_KEY = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11" class WebSocketWSGI: def __init__(self, handler): self.handler = handler def verify_client(self, ws): pass def _get_key_value(self, key_value): if not key_value: return key_number = int(re.sub("\\D", "", key_value)) spaces = re.subn(" ", "", key_value)[1] if key_number % spaces != 0: return part = key_number / spaces return part def __call__(self, environ, start_response): if not (environ.get('HTTP_CONNECTION').find('Upgrade') != -1 and environ['HTTP_UPGRADE'].lower() == 'websocket'): # need to check a few more things here for true compliance start_response('400 Bad Request', [('Connection','close')]) return [] sock = environ['gunicorn.socket'] version = environ.get('HTTP_SEC_WEBSOCKET_VERSION') ws = WebSocket(sock, environ, version) handshake_reply = ("HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n") key = environ.get('HTTP_SEC_WEBSOCKET_KEY') if key: ws_key = base64.b64decode(key) if len(ws_key) != 16: start_response('400 Bad Request', [('Connection','close')]) return [] protocols = [] subprotocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL') ws_protocols = [] if subprotocols: for s in subprotocols.split(','): s = s.strip() if s in protocols: ws_protocols.append(s) if ws_protocols: handshake_reply += 'Sec-WebSocket-Protocol: %s\r\n' % ', '.join(ws_protocols) exts = [] extensions = environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS') ws_extensions = [] if extensions: for ext in extensions.split(','): ext = ext.strip() if ext in exts: ws_extensions.append(ext) if ws_extensions: handshake_reply += 'Sec-WebSocket-Extensions: %s\r\n' % ', '.join(ws_extensions) key_hash = hashlib.sha1() key_hash.update(key.encode()) key_hash.update(WS_KEY) handshake_reply += ( "Sec-WebSocket-Origin: %s\r\n" "Sec-WebSocket-Location: ws://%s%s\r\n" "Sec-WebSocket-Version: %s\r\n" "Sec-WebSocket-Accept: %s\r\n\r\n" % ( environ.get('HTTP_ORIGIN'), environ.get('HTTP_HOST'), ws.path, version, base64.b64encode(key_hash.digest()).decode() )) else: handshake_reply += ( "WebSocket-Origin: %s\r\n" "WebSocket-Location: ws://%s%s\r\n\r\n" % ( environ.get('HTTP_ORIGIN'), environ.get('HTTP_HOST'), ws.path)) sock.sendall(handshake_reply.encode()) try: self.handler(ws) except BrokenPipeError: pass else: raise # use this undocumented feature of grainbows to ensure that it # doesn't barf on the fact that we didn't call start_response return ALREADY_HANDLED class WebSocket: """A websocket object that handles the details of serialization/deserialization to the socket. The primary way to interact with a :class:`WebSocket` object is to call :meth:`send` and :meth:`wait` in order to pass messages back and forth with the browser. Also available are the following properties: path The path value of the request. This is the same as the WSGI PATH_INFO variable, but more convenient. protocol The value of the Websocket-Protocol header. origin The value of the 'Origin' header. environ The full WSGI environment for this request. """ def __init__(self, sock, environ, version=76): """ :param socket: The gevent socket :type socket: :class:`gevent.socket.socket` :param environ: The wsgi environment :param version: The WebSocket spec version to follow (default is 76) """ self.socket = sock self.origin = environ.get('HTTP_ORIGIN') self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL') self.path = environ.get('PATH_INFO') self.environ = environ self.version = version self.websocket_closed = False self._buf = "" self._msgs = collections.deque() #self._sendlock = semaphore.Semaphore() @staticmethod def encode_hybi(buf, opcode, base64=False): """ Encode a HyBi style WebSocket frame. Optional opcode: 0x0 - continuation 0x1 - text frame (base64 encode buf) 0x2 - binary frame (use raw buf) 0x8 - connection close 0x9 - ping 0xA - pong """ if base64: buf = b64encode(buf) else: buf = buf.encode() b1 = 0x80 | (opcode & 0x0f) # FIN + opcode payload_len = len(buf) if payload_len <= 125: header = struct.pack('>BB', b1, payload_len) elif payload_len > 125 and payload_len < 65536: header = struct.pack('>BBH', b1, 126, payload_len) elif payload_len >= 65536: header = struct.pack('>BBQ', b1, 127, payload_len) #print("Encoded: %s" % repr(header + buf)) return header + buf, len(header), 0 @staticmethod def decode_hybi(buf, base64=False): """ Decode HyBi style WebSocket packets. Returns: {'fin' : 0_or_1, 'opcode' : number, 'mask' : 32_bit_number, 'hlen' : header_bytes_number, 'length' : payload_bytes_number, 'payload' : decoded_buffer, 'left' : bytes_left_number, 'close_code' : number, 'close_reason' : string} """ f = {'fin' : 0, 'opcode' : 0, 'mask' : 0, 'hlen' : 2, 'length' : 0, 'payload' : None, 'left' : 0, 'close_code' : None, 'close_reason' : None} blen = len(buf) f['left'] = blen if blen < f['hlen']: return f # Incomplete frame header b1, b2 = struct.unpack_from(">BB", buf) f['opcode'] = b1 & 0x0f f['fin'] = (b1 & 0x80) >> 7 has_mask = (b2 & 0x80) >> 7 f['length'] = b2 & 0x7f if f['length'] == 126: f['hlen'] = 4 if blen < f['hlen']: return f # Incomplete frame header (f['length'],) = struct.unpack_from('>xxH', buf) elif f['length'] == 127: f['hlen'] = 10 if blen < f['hlen']: return f # Incomplete frame header (f['length'],) = struct.unpack_from('>xxQ', buf) full_len = f['hlen'] + has_mask * 4 + f['length'] if blen < full_len: # Incomplete frame return f # Incomplete frame header # Number of bytes that are part of the next frame(s) f['left'] = blen - full_len # Process 1 frame if has_mask: # unmask payload f['mask'] = buf[f['hlen']:f['hlen']+4] b = c = '' if f['length'] >= 4: data = struct.unpack('= 2: f['close_code'] = struct.unpack_from(">H", f['payload']) if f['length'] > 3: f['close_reason'] = f['payload'][2:] return f @staticmethod def _pack_message(message): """Pack the message inside ``00`` and ``FF`` As per the dataframing section (5.3) for the websocket spec """ if isinstance(message, str): message = message.encode('utf-8') packed = "\x00%s\xFF" % message return packed def _parse_messages(self): """ Parses for messages in the buffer *buf*. It is assumed that the buffer contains the start character for a message, but that it may contain only part of the rest of the message. Returns an array of messages, and the buffer remainder that didn't contain any full messages.""" msgs = [] end_idx = 0 buf = self._buf while buf: if self.version in ['7', '8', '13']: frame = self.decode_hybi(buf, base64=False) #print("Received buf: %s, frame: %s" % (repr(buf), frame)) if frame['payload'] == None: break else: if frame['opcode'] == 0x8: # connection close self.websocket_closed = True break #elif frame['opcode'] == 0x1: else: msgs.append(frame['payload']); #msgs.append(frame['payload'].decode('utf-8', 'replace')); #buf = buf[-frame['left']:] if frame['left']: buf = buf[-frame['left']:] else: buf = '' else: frame_type = ord(buf[0]) if frame_type == 0: # Normal message. end_idx = buf.find("\xFF") if end_idx == -1: #pragma NO COVER break msgs.append(buf[1:end_idx].decode('utf-8', 'replace')) buf = buf[end_idx+1:] elif frame_type == 255: # Closing handshake. assert ord(buf[1]) == 0, "Unexpected closing handshake: %r" % buf self.websocket_closed = True break else: raise ValueError("Don't understand how to parse this type of message: %r" % buf) self._buf = buf return msgs def send(self, message): """Send a message to the browser. *message* should be convertible to a string; unicode objects should be encodable as utf-8. Raises socket.error with errno of 32 (broken pipe) if the socket has already been closed by the client.""" if self.version in ['7', '8', '13']: packed, lenhead, lentail = self.encode_hybi(message, opcode=0x01, base64=False) else: packed = self._pack_message(message) # if two greenthreads are trying to send at the same time # on the same socket, sendlock prevents interleaving and corruption #self._sendlock.acquire() try: self.socket.sendall(packed) finally: pass #self._sendlock.release() def wait(self): """Waits for and deserializes messages. Returns a single message; the oldest not yet processed. If the client has already closed the connection, returns None. This is different from normal socket behavior because the empty string is a valid websocket message.""" while not self._msgs: # Websocket might be closed already. if self.websocket_closed: return None # no parsed messages, must mean buf needs more data delta = self.socket.recv(8096) if delta == b'': return None self._buf += delta msgs = self._parse_messages() self._msgs.extend(msgs) return self._msgs.popleft() def _send_closing_frame(self, ignore_send_errors=False): """Sends the closing frame to the client, if required.""" if self.version in ['7', '8', '13'] and not self.websocket_closed: msg = '' #if code != None: # msg = struct.pack(">H%ds" % (len(reason)), code) buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False) self.socket.sendall(buf) self.websocket_closed = True elif self.version == 76 and not self.websocket_closed: try: self.socket.sendall(b"\xff\x00") except SocketError: # Sometimes, like when the remote side cuts off the connection, # we don't care about this. if not ignore_send_errors: #pragma NO COVER raise self.websocket_closed = True def close(self): """Forcibly close the websocket; generally it is preferable to return from the handler method.""" self._send_closing_frame() self.socket.shutdown(True) self.socket.close() # demo app import os import random def handle(ws): """ This is the websocket handler function. Note that we can dispatch based on path in here, too.""" if ws.path == '/echo': while True: m = ws.wait() if m is None: break ws.send(m) elif ws.path == '/data': for i in range(10000): ws.send("0 %s %s\n" % (i, random.random())) gevent.sleep(0.1) wsapp = WebSocketWSGI(handle) def app(environ, start_response): """ This resolves to the web page or the websocket depending on the path.""" if environ['PATH_INFO'] == '/' or environ['PATH_INFO'] == "": data = open(os.path.join( os.path.dirname(__file__), 'websocket.html')).read() data = data % environ start_response('200 OK', [('Content-Type', 'text/html'), ('Content-Length', str(len(data)))]) return [data.encode()] else: return wsapp(environ, start_response) benoitc-gunicorn-f5fb19e/examples/websocket/websocket.html000066400000000000000000000024501514360242400241440ustar00rootroot00000000000000

Plot

benoitc-gunicorn-f5fb19e/examples/when_ready.conf.py000066400000000000000000000021461514360242400227270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import signal import commands import threading import time max_mem = 100000 class MemoryWatch(threading.Thread): def __init__(self, server, max_mem): super().__init__() self.daemon = True self.server = server self.max_mem = max_mem self.timeout = server.timeout / 2 def memory_usage(self, pid): try: out = commands.getoutput("ps -o rss -p %s" % pid) except OSError: return -1 used_mem = sum(int(x) for x in out.split('\n')[1:]) return used_mem def run(self): while True: for (pid, worker) in list(self.server.WORKERS.items()): if self.memory_usage(pid) > self.max_mem: self.server.log.info("Pid %s killed (memory usage > %s)", pid, self.max_mem) self.server.kill_worker(pid, signal.SIGTERM) time.sleep(self.timeout) def when_ready(server): mw = MemoryWatch(server, max_mem) mw.start() benoitc-gunicorn-f5fb19e/gunicorn/000077500000000000000000000000001514360242400173075ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/__init__.py000066400000000000000000000004011514360242400214130ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. version_info = (25, 1, 0) __version__ = ".".join([str(v) for v in version_info]) SERVER = "gunicorn" SERVER_SOFTWARE = "%s/%s" % (SERVER, __version__) benoitc-gunicorn-f5fb19e/gunicorn/__main__.py000066400000000000000000000005221514360242400214000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.app.wsgiapp import run if __name__ == "__main__": # see config.py - argparse defaults to basename(argv[0]) == "__main__.py" # todo: let runpy.run_module take care of argv[0] rewriting run(prog="gunicorn") benoitc-gunicorn-f5fb19e/gunicorn/app/000077500000000000000000000000001514360242400200675ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/app/__init__.py000066400000000000000000000001511514360242400221750ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/gunicorn/app/base.py000066400000000000000000000163121514360242400213560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import importlib.util import importlib.machinery import os import sys import traceback from gunicorn import util from gunicorn.arbiter import Arbiter from gunicorn.config import Config, get_default_config_file from gunicorn import debug class BaseApplication: """ An application interface for configuring and loading the various necessities for any given web framework. """ def __init__(self, usage=None, prog=None): self.usage = usage self.cfg = None self.callable = None self.prog = prog self.logger = None self.do_load_config() def do_load_config(self): """ Loads the configuration """ try: self.load_default_config() self.load_config() except Exception as e: print("\nError: %s" % str(e), file=sys.stderr) sys.stderr.flush() sys.exit(1) def load_default_config(self): # init configuration self.cfg = Config(self.usage, prog=self.prog) def init(self, parser, opts, args): raise NotImplementedError def load(self): raise NotImplementedError def load_config(self): """ This method is used to load the configuration from one or several input(s). Custom Command line, configuration file. You have to override this method in your class. """ raise NotImplementedError def reload(self): self.do_load_config() if self.cfg.spew: debug.spew() def wsgi(self): if self.callable is None: self.callable = self.load() return self.callable def run(self): try: Arbiter(self).run() except RuntimeError as e: print("\nError: %s\n" % e, file=sys.stderr) sys.stderr.flush() sys.exit(1) class Application(BaseApplication): # 'init' and 'load' methods are implemented by WSGIApplication. # pylint: disable=abstract-method def chdir(self): # chdir to the configured path before loading, # default is the current dir os.chdir(self.cfg.chdir) # add the path to sys.path if self.cfg.chdir not in sys.path: sys.path.insert(0, self.cfg.chdir) def get_config_from_filename(self, filename): if not os.path.exists(filename): raise RuntimeError("%r doesn't exist" % filename) ext = os.path.splitext(filename)[1] try: module_name = '__config__' if ext in [".py", ".pyc"]: spec = importlib.util.spec_from_file_location(module_name, filename) else: msg = "configuration file should have a valid Python extension.\n" util.warn(msg) loader_ = importlib.machinery.SourceFileLoader(module_name, filename) spec = importlib.util.spec_from_file_location(module_name, filename, loader=loader_) mod = importlib.util.module_from_spec(spec) sys.modules[module_name] = mod spec.loader.exec_module(mod) except Exception: print("Failed to read config file: %s" % filename, file=sys.stderr) traceback.print_exc() sys.stderr.flush() sys.exit(1) return vars(mod) def get_config_from_module_name(self, module_name): return vars(importlib.import_module(module_name)) def load_config_from_module_name_or_filename(self, location): """ Loads the configuration file: the file is a python file, otherwise raise an RuntimeError Exception or stop the process if the configuration file contains a syntax error. """ if location.startswith("python:"): module_name = location[len("python:"):] cfg = self.get_config_from_module_name(module_name) else: if location.startswith("file:"): filename = location[len("file:"):] else: filename = location cfg = self.get_config_from_filename(filename) for k, v in cfg.items(): # Ignore unknown names if k not in self.cfg.settings: continue try: self.cfg.set(k.lower(), v) except Exception: print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr) sys.stderr.flush() raise return cfg def load_config_from_file(self, filename): return self.load_config_from_module_name_or_filename(location=filename) def load_config(self): # parse console args parser = self.cfg.parser() args = parser.parse_args() # optional settings from apps cfg = self.init(parser, args, args.args) # set up import paths and follow symlinks self.chdir() # Load up the any app specific configuration if cfg: for k, v in cfg.items(): self.cfg.set(k.lower(), v) env_args = parser.parse_args(self.cfg.get_cmd_args_from_env()) if args.config: self.load_config_from_file(args.config) elif env_args.config: self.load_config_from_file(env_args.config) else: default_config = get_default_config_file() if default_config is not None: self.load_config_from_file(default_config) # Load up environment configuration for k, v in vars(env_args).items(): if v is None: continue if k == "args": continue self.cfg.set(k.lower(), v) # Lastly, update the configuration with any command line settings. for k, v in vars(args).items(): if v is None: continue if k == "args": continue self.cfg.set(k.lower(), v) # current directory might be changed by the config now # set up import paths and follow symlinks self.chdir() def run(self): if self.cfg.print_config: print(self.cfg) if self.cfg.print_config or self.cfg.check_config: try: self.load() except Exception: msg = "\nError while loading the application:\n" print(msg, file=sys.stderr) traceback.print_exc() sys.stderr.flush() sys.exit(1) sys.exit(0) if self.cfg.spew: debug.spew() if self.cfg.daemon: if os.environ.get('NOTIFY_SOCKET'): msg = "Warning: you shouldn't specify `daemon = True`" \ " when launching by systemd with `Type = notify`" print(msg, file=sys.stderr, flush=True) util.daemonize(self.cfg.enable_stdio_inheritance) # set python paths if self.cfg.pythonpath: paths = self.cfg.pythonpath.split(",") for path in paths: pythonpath = os.path.abspath(path) if pythonpath not in sys.path: sys.path.insert(0, pythonpath) super().run() benoitc-gunicorn-f5fb19e/gunicorn/app/pasterapp.py000066400000000000000000000037401514360242400224440ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import configparser import os from paste.deploy import loadapp from gunicorn.app.wsgiapp import WSGIApplication from gunicorn.config import get_default_config_file def get_wsgi_app(config_uri, name=None, defaults=None): if ':' not in config_uri: config_uri = "config:%s" % config_uri return loadapp( config_uri, name=name, relative_to=os.getcwd(), global_conf=defaults, ) def has_logging_config(config_file): parser = configparser.ConfigParser() parser.read([config_file]) return parser.has_section('loggers') def serve(app, global_conf, **local_conf): """\ A Paste Deployment server runner. Example configuration: [server:main] use = egg:gunicorn#main host = 127.0.0.1 port = 5000 """ config_file = global_conf['__file__'] gunicorn_config_file = local_conf.pop('config', None) host = local_conf.pop('host', '') port = local_conf.pop('port', '') if host and port: local_conf['bind'] = '%s:%s' % (host, port) elif host: local_conf['bind'] = host.split(',') class PasterServerApplication(WSGIApplication): def load_config(self): self.cfg.set("default_proc_name", config_file) if has_logging_config(config_file): self.cfg.set("logconfig", config_file) if gunicorn_config_file: self.load_config_from_file(gunicorn_config_file) else: default_gunicorn_config_file = get_default_config_file() if default_gunicorn_config_file is not None: self.load_config_from_file(default_gunicorn_config_file) for k, v in local_conf.items(): if v is not None: self.cfg.set(k.lower(), v) def load(self): return app PasterServerApplication().run() benoitc-gunicorn-f5fb19e/gunicorn/app/wsgiapp.py000066400000000000000000000036041514360242400221160ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os from gunicorn.errors import ConfigError from gunicorn.app.base import Application from gunicorn import util class WSGIApplication(Application): def init(self, parser, opts, args): self.app_uri = None if opts.paste: from .pasterapp import has_logging_config config_uri = os.path.abspath(opts.paste) config_file = config_uri.split('#')[0] if not os.path.exists(config_file): raise ConfigError("%r not found" % config_file) self.cfg.set("default_proc_name", config_file) self.app_uri = config_uri if has_logging_config(config_file): self.cfg.set("logconfig", config_file) return if len(args) > 0: self.cfg.set("default_proc_name", args[0]) self.app_uri = args[0] def load_config(self): super().load_config() if self.app_uri is None: if self.cfg.wsgi_app is not None: self.app_uri = self.cfg.wsgi_app else: raise ConfigError("No application module specified.") def load_wsgiapp(self): return util.import_app(self.app_uri) def load_pasteapp(self): from .pasterapp import get_wsgi_app return get_wsgi_app(self.app_uri, defaults=self.cfg.paste_global_conf) def load(self): if self.cfg.paste is not None: return self.load_pasteapp() else: return self.load_wsgiapp() def run(prog=None): """\ The ``gunicorn`` command line runner for launching Gunicorn with generic WSGI applications. """ from gunicorn.app.wsgiapp import WSGIApplication WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]", prog=prog).run() if __name__ == '__main__': run() benoitc-gunicorn-f5fb19e/gunicorn/arbiter.py000066400000000000000000001004231514360242400213110ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import errno import os import queue import random import signal import sys import time import traceback import socket from gunicorn.errors import HaltServer, AppImportError from gunicorn.pidfile import Pidfile from gunicorn import sock, systemd, util from gunicorn import __version__, SERVER_SOFTWARE # gunicorn.dirty is imported lazily in spawn_dirty_arbiter() for gevent compatibility class Arbiter: """ Arbiter maintain the workers processes alive. It launches or kills them if needed. It also manages application reloading via SIGHUP/USR2. """ # A flag indicating if a worker failed to # to boot. If a worker process exist with # this error code, the arbiter will terminate. WORKER_BOOT_ERROR = 3 # A flag indicating if an application failed to be loaded APP_LOAD_ERROR = 4 START_CTX = {} LISTENERS = [] WORKERS = {} # Sentinel value for non-signal wakeups WAKEUP_REQUEST = signal.NSIG SIGNALS = [getattr(signal, "SIG%s" % x) for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()] SIG_NAMES = dict( (getattr(signal, name), name[3:].lower()) for name in dir(signal) if name[:3] == "SIG" and name[3] != "_" ) def __init__(self, app): os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE self._num_workers = None self._last_logged_active_worker_count = None self.log = None # Signal queue - SimpleQueue is reentrant-safe for signal handlers self.SIG_QUEUE = queue.SimpleQueue() self.setup(app) self.pidfile = None self.systemd = False self.worker_age = 0 self.reexec_pid = 0 self.master_pid = 0 self.master_name = "Master" # Dirty arbiter process self.dirty_arbiter_pid = 0 self.dirty_arbiter = None self.dirty_pidfile = None # Well-known location for orphan detection # Control socket server self._control_server = None # Stats tracking self._stats = { 'start_time': None, 'workers_spawned': 0, 'workers_killed': 0, 'reloads': 0, } cwd = util.getcwd() args = sys.argv[:] args.insert(0, sys.executable) # init start context self.START_CTX = { "args": args, "cwd": cwd, 0: sys.executable } def _get_num_workers(self): return self._num_workers def _set_num_workers(self, value): old_value = self._num_workers self._num_workers = value self.cfg.nworkers_changed(self, value, old_value) num_workers = property(_get_num_workers, _set_num_workers) def setup(self, app): self.app = app self.cfg = app.cfg if self.log is None: self.log = self.cfg.logger_class(app.cfg) # reopen files if 'GUNICORN_PID' in os.environ: self.log.reopen_files() self.worker_class = self.cfg.worker_class self.address = self.cfg.address self.num_workers = self.cfg.workers self.timeout = self.cfg.timeout self.proc_name = self.cfg.proc_name self.log.debug('Current configuration:\n{0}'.format( '\n'.join( ' {0}: {1}'.format(config, value.value) for config, value in sorted(self.cfg.settings.items(), key=lambda setting: setting[1])))) # set environment' variables if self.cfg.env: for k, v in self.cfg.env.items(): os.environ[k] = v if self.cfg.preload_app: self.app.wsgi() def start(self): """\ Initialize the arbiter. Start listening and set pidfile if needed. """ self.log.info("Starting gunicorn %s", __version__) # Initialize stats tracking self._stats['start_time'] = time.time() if 'GUNICORN_PID' in os.environ: self.master_pid = int(os.environ.get('GUNICORN_PID')) self.proc_name = self.proc_name + ".2" self.master_name = "Master.2" self.pid = os.getpid() if self.cfg.pidfile is not None: pidname = self.cfg.pidfile if self.master_pid != 0: pidname += ".2" self.pidfile = Pidfile(pidname) self.pidfile.create(self.pid) self.cfg.on_starting(self) self.init_signals() if not self.LISTENERS: fds = None listen_fds = systemd.listen_fds() if listen_fds: self.systemd = True fds = range(systemd.SD_LISTEN_FDS_START, systemd.SD_LISTEN_FDS_START + listen_fds) elif self.master_pid: fds = [] for fd in os.environ.pop('GUNICORN_FD').split(','): fds.append(int(fd)) if not (self.cfg.reuse_port and hasattr(socket, 'SO_REUSEPORT')): self.LISTENERS = sock.create_sockets(self.cfg, self.log, fds) listeners_str = ",".join([str(lnr) for lnr in self.LISTENERS]) self.log.debug("Arbiter booted") self.log.info("Listening at: %s (%s)", listeners_str, self.pid) self.log.info("Using worker: %s", self.cfg.worker_class_str) systemd.sd_notify("READY=1\nSTATUS=Gunicorn arbiter booted", self.log) # check worker class requirements if hasattr(self.worker_class, "check_config"): self.worker_class.check_config(self.cfg, self.log) # Start dirty arbiter if configured if self.cfg.dirty_workers > 0 and self.cfg.dirty_apps: self.spawn_dirty_arbiter() # Start control socket server self._start_control_server() self.cfg.when_ready(self) def init_signals(self): """\ Initialize master signal handling. Most of the signals are queued. Child signals only wake up the master. """ self.log.close_on_exec() # initialize all signals for s in self.SIGNALS: signal.signal(s, self.signal) signal.signal(signal.SIGCHLD, self.signal_chld) def signal(self, sig, frame): """Signal handler - NO LOGGING, just queue the signal.""" self.SIG_QUEUE.put_nowait(sig) def run(self): "Main master loop." self.start() util._setproctitle("master [%s]" % self.proc_name) try: self.manage_workers() while True: self.maybe_promote_master() # Wait for and process signals for sig in self.wait_for_signals(timeout=1.0): if sig not in self.SIG_NAMES: self.log.info("Ignoring unknown signal: %s", sig) continue signame = self.SIG_NAMES.get(sig) handler = getattr(self, "handle_%s" % signame, None) if not handler: self.log.error("Unhandled signal: %s", signame) continue # Log SIGCHLD at debug level since it's frequent log_level = self.log.debug if sig == signal.SIGCHLD else self.log.info log_level("Handling signal: %s", signame) handler() self.murder_workers() self.manage_workers() self.manage_dirty_arbiter() except (StopIteration, KeyboardInterrupt): self.halt() except HaltServer as inst: self.halt(reason=inst.reason, exit_status=inst.exit_status) except SystemExit: raise except Exception: self.log.error("Unhandled exception in main loop", exc_info=True) self.stop(False) if self.pidfile is not None: self.pidfile.unlink() sys.exit(-1) def signal_chld(self, sig, frame): """SIGCHLD signal handler - NO LOGGING, just queue the signal.""" self.SIG_QUEUE.put_nowait(sig) def handle_chld(self): """SIGCHLD handling - called from main loop, safe to log.""" self.reap_workers() self.reap_dirty_arbiter() # SIGCLD is an alias for SIGCHLD on Linux. The SIG_NAMES dict may map # to either "chld" or "cld" depending on iteration order of dir(signal). handle_cld = handle_chld def handle_hup(self): """\ HUP handling. - Reload configuration - Start the new worker processes with a new configuration - Gracefully shutdown the old worker processes """ self.log.info("Hang up: %s", self.master_name) self.reload() # Forward to dirty arbiter if self.dirty_arbiter_pid: self.kill_dirty_arbiter(signal.SIGHUP) def handle_term(self): "SIGTERM handling" raise StopIteration def handle_int(self): "SIGINT handling" self.stop(False) raise StopIteration def handle_quit(self): "SIGQUIT handling" self.stop(False) raise StopIteration def handle_ttin(self): """\ SIGTTIN handling. Increases the number of workers by one. """ self.num_workers += 1 self.manage_workers() def handle_ttou(self): """\ SIGTTOU handling. Decreases the number of workers by one. """ if self.num_workers <= 1: return self.num_workers -= 1 self.manage_workers() def handle_usr1(self): """\ SIGUSR1 handling. Kill all workers by sending them a SIGUSR1 """ self.log.reopen_files() self.kill_workers(signal.SIGUSR1) # Forward to dirty arbiter if self.dirty_arbiter_pid: self.kill_dirty_arbiter(signal.SIGUSR1) def handle_usr2(self): """\ SIGUSR2 handling. Creates a new arbiter/worker set as a fork of the current arbiter without affecting old workers. Use this to do live deployment with the ability to backout a change. """ self.reexec() def handle_winch(self): """SIGWINCH handling""" if self.cfg.daemon: self.log.info("graceful stop of workers") self.num_workers = 0 self.kill_workers(signal.SIGTERM) else: self.log.debug("SIGWINCH ignored. Not daemonized") def maybe_promote_master(self): if self.master_pid == 0: return if self.master_pid != os.getppid(): self.log.info("Master has been promoted.") # reset master infos self.master_name = "Master" self.master_pid = 0 self.proc_name = self.cfg.proc_name del os.environ['GUNICORN_PID'] # rename the pidfile if self.pidfile is not None: self.pidfile.rename(self.cfg.pidfile) # reset proctitle util._setproctitle("master [%s]" % self.proc_name) def wakeup(self): """Wake up the arbiter's main loop.""" self.SIG_QUEUE.put_nowait(self.WAKEUP_REQUEST) def halt(self, reason=None, exit_status=0): """ halt arbiter """ # Stop control socket server first self._stop_control_server() self.stop() log_func = self.log.info if exit_status == 0 else self.log.error log_func("Shutting down: %s", self.master_name) if reason is not None: log_func("Reason: %s", reason) if self.pidfile is not None: self.pidfile.unlink() self.cfg.on_exit(self) sys.exit(exit_status) def wait_for_signals(self, timeout=1.0): """\ Wait for signals with timeout. Returns a list of signals that were received. """ signals = [] try: # Block until we get a signal or timeout sig = self.SIG_QUEUE.get(block=True, timeout=timeout) if sig != self.WAKEUP_REQUEST: signals.append(sig) # Drain any additional queued signals while True: try: sig = self.SIG_QUEUE.get_nowait() if sig != self.WAKEUP_REQUEST: signals.append(sig) except queue.Empty: break except queue.Empty: pass except KeyboardInterrupt: sys.exit() return signals def stop(self, graceful=True): """\ Stop workers :attr graceful: boolean, If True (the default) workers will be killed gracefully (ie. trying to wait for the current connection) """ unlink = ( self.reexec_pid == self.master_pid == 0 and not self.systemd and not self.cfg.reuse_port ) sock.close_sockets(self.LISTENERS, unlink) self.LISTENERS = [] sig = signal.SIGTERM if not graceful: sig = signal.SIGQUIT limit = time.time() + self.cfg.graceful_timeout # Stop dirty arbiter if self.dirty_arbiter_pid: self.kill_dirty_arbiter(sig) # instruct the workers to exit self.kill_workers(sig) # wait until the graceful timeout quick_shutdown = not graceful while (self.WORKERS or self.dirty_arbiter_pid) and time.time() < limit: # Check for SIGINT/SIGQUIT to trigger quick shutdown if not quick_shutdown: try: pending_sig = self.SIG_QUEUE.get_nowait() if pending_sig in (signal.SIGINT, signal.SIGQUIT): self.log.info("Quick shutdown requested") quick_shutdown = True self.kill_workers(signal.SIGQUIT) if self.dirty_arbiter_pid: self.kill_dirty_arbiter(signal.SIGQUIT) # Give workers a short time to exit cleanly limit = time.time() + 2.0 except Exception: pass self.reap_workers() self.reap_dirty_arbiter() time.sleep(0.1) self.kill_workers(signal.SIGKILL) if self.dirty_arbiter_pid: self.kill_dirty_arbiter(signal.SIGKILL) # Final reap to clean up any remaining zombies self.reap_workers() self.reap_dirty_arbiter() def reexec(self): """\ Relaunch the master and workers. """ if self.reexec_pid != 0: self.log.warning("USR2 signal ignored. Child exists.") return if self.master_pid != 0: self.log.warning("USR2 signal ignored. Parent exists.") return master_pid = os.getpid() self.reexec_pid = os.fork() if self.reexec_pid != 0: return self.cfg.pre_exec(self) environ = self.cfg.env_orig.copy() environ['GUNICORN_PID'] = str(master_pid) if self.systemd: environ['LISTEN_PID'] = str(os.getpid()) environ['LISTEN_FDS'] = str(len(self.LISTENERS)) else: environ['GUNICORN_FD'] = ','.join( str(lnr.fileno()) for lnr in self.LISTENERS) os.chdir(self.START_CTX['cwd']) # exec the process using the original environment os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ) def reload(self): # Track reload stats self._stats['reloads'] += 1 old_address = self.cfg.address # reset old environment for k in self.cfg.env: if k in self.cfg.env_orig: # reset the key to the value it had before # we launched gunicorn os.environ[k] = self.cfg.env_orig[k] else: # delete the value set by gunicorn try: del os.environ[k] except KeyError: pass # reload conf self.app.reload() self.setup(self.app) # reopen log files self.log.reopen_files() # do we need to change listener ? if old_address != self.cfg.address: # close all listeners for lnr in self.LISTENERS: lnr.close() # init new listeners self.LISTENERS = sock.create_sockets(self.cfg, self.log) listeners_str = ",".join([str(lnr) for lnr in self.LISTENERS]) self.log.info("Listening at: %s", listeners_str) # do some actions on reload self.cfg.on_reload(self) # unlink pidfile if self.pidfile is not None: self.pidfile.unlink() # create new pidfile if self.cfg.pidfile is not None: self.pidfile = Pidfile(self.cfg.pidfile) self.pidfile.create(self.pid) # set new proc_name util._setproctitle("master [%s]" % self.proc_name) # Remember current worker age before spawning new workers last_worker_age = self.worker_age # spawn new workers for _ in range(self.cfg.workers): self.spawn_worker() # manage workers - this will kill old workers beyond num_workers self.manage_workers() # wait for old workers to terminate to prevent double SIGTERM deadline = time.monotonic() + self.cfg.graceful_timeout while time.monotonic() < deadline: if not self.WORKERS: break # Check if all remaining workers are newer than last_worker_age oldest = min(w.age for w in self.WORKERS.values()) if oldest > last_worker_age: break self.reap_workers() time.sleep(0.1) def murder_workers(self): """\ Kill unused/idle workers """ if not self.timeout: return workers = list(self.WORKERS.items()) for (pid, worker) in workers: try: if time.monotonic() - worker.tmp.last_update() <= self.timeout: continue except (OSError, ValueError): continue if not worker.aborted: self.log.critical("WORKER TIMEOUT (pid:%s)", pid) worker.aborted = True self.kill_worker(pid, signal.SIGABRT) else: self.kill_worker(pid, signal.SIGKILL) def reap_workers(self): """\ Reap workers to avoid zombie processes """ try: while True: wpid, status = os.waitpid(-1, os.WNOHANG) if not wpid: break if self.reexec_pid == wpid: self.reexec_pid = 0 else: # A worker was terminated. If the termination reason was # that it could not boot, we'll shut it down to avoid # infinite start/stop cycles. exitcode = None if os.WIFEXITED(status): exitcode = os.WEXITSTATUS(status) elif os.WIFSIGNALED(status): sig = os.WTERMSIG(status) try: sig_name = signal.Signals(sig).name except ValueError: sig_name = "signal {}".format(sig) msg = "Worker (pid:{}) was sent {}!".format( wpid, sig_name) # SIGKILL suggests OOM, log as error if sig == signal.SIGKILL: msg += " Perhaps out of memory?" self.log.error(msg) elif sig == signal.SIGTERM: # SIGTERM is expected during graceful shutdown self.log.info(msg) else: # Other signals are unexpected self.log.warning(msg) if exitcode is not None and exitcode != 0: self.log.error("Worker (pid:%s) exited with code %s.", wpid, exitcode) if exitcode == self.WORKER_BOOT_ERROR: reason = "Worker failed to boot." raise HaltServer(reason, self.WORKER_BOOT_ERROR) if exitcode == self.APP_LOAD_ERROR: reason = "App failed to load." raise HaltServer(reason, self.APP_LOAD_ERROR) worker = self.WORKERS.pop(wpid, None) if not worker: continue worker.tmp.close() self.cfg.child_exit(self, worker) except OSError as e: if e.errno != errno.ECHILD: raise def manage_workers(self): """\ Maintain the number of workers by spawning or killing as required. """ if len(self.WORKERS) < self.num_workers: self.spawn_workers() workers = self.WORKERS.items() workers = sorted(workers, key=lambda w: w[1].age) while len(workers) > self.num_workers: (pid, _) = workers.pop(0) self.kill_worker(pid, signal.SIGTERM) active_worker_count = len(workers) if self._last_logged_active_worker_count != active_worker_count: self._last_logged_active_worker_count = active_worker_count self.log.debug("{0} workers".format(active_worker_count), extra={"metric": "gunicorn.workers", "value": active_worker_count, "mtype": "gauge"}) if self.cfg.enable_backlog_metric: backlog = sum(sock.get_backlog() or 0 for sock in self.LISTENERS) if backlog >= 0: self.log.debug("socket backlog: {0}".format(backlog), extra={"metric": "gunicorn.backlog", "value": backlog, "mtype": "histogram"}) def spawn_worker(self): self.worker_age += 1 worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS, self.app, self.timeout / 2.0, self.cfg, self.log) self.cfg.pre_fork(self, worker) pid = os.fork() if pid != 0: worker.pid = pid self.WORKERS[pid] = worker self._stats['workers_spawned'] += 1 return pid # Do not inherit the temporary files of other workers for sibling in self.WORKERS.values(): sibling.tmp.close() # Process Child worker.pid = os.getpid() try: util._setproctitle("worker [%s]" % self.proc_name) self.log.info("Booting worker with pid: %s", worker.pid) if self.cfg.reuse_port: worker.sockets = sock.create_sockets(self.cfg, self.log) self.cfg.post_fork(self, worker) worker.init_process() sys.exit(0) except SystemExit: raise except AppImportError as e: self.log.debug("Exception while loading the application", exc_info=True) print("%s" % e, file=sys.stderr) sys.stderr.flush() sys.exit(self.APP_LOAD_ERROR) except Exception as e: self.log.exception("Exception in worker process") print("%s" % e, file=sys.stderr) sys.stderr.flush() if not worker.booted: sys.exit(self.WORKER_BOOT_ERROR) sys.exit(-1) finally: self.log.info("Worker exiting (pid: %s)", worker.pid) try: worker.tmp.close() self.cfg.worker_exit(self, worker) except Exception: self.log.warning("Exception during worker exit:\n%s", traceback.format_exc()) def spawn_workers(self): """\ Spawn new workers as needed. This is where a worker process leaves the main loop of the master process. """ for _ in range(self.num_workers - len(self.WORKERS)): self.spawn_worker() time.sleep(0.1 * random.random()) def kill_workers(self, sig): """\ Kill all workers with the signal `sig` :attr sig: `signal.SIG*` value """ worker_pids = list(self.WORKERS.keys()) for pid in worker_pids: self.kill_worker(pid, sig) def kill_worker(self, pid, sig): """\ Kill a worker :attr pid: int, worker pid :attr sig: `signal.SIG*` value """ try: os.kill(pid, sig) # Track kills only on SIGTERM/SIGKILL (actual termination signals) if sig in (signal.SIGTERM, signal.SIGKILL): self._stats['workers_killed'] += 1 except OSError as e: if e.errno == errno.ESRCH: try: worker = self.WORKERS.pop(pid) worker.tmp.close() self.cfg.worker_exit(self, worker) return except (KeyError, OSError): return raise # ========================================================================= # Dirty Arbiter Management # ========================================================================= def _get_dirty_pidfile_path(self): """Get the well-known PID file path for orphan detection. Uses self.proc_name (not self.cfg.proc_name) so that during USR2 the new master gets a different PID file path ("myapp.2" vs "myapp"). This prevents the old dirty arbiter from removing the new one's PID file. """ import tempfile safe_name = self.proc_name.replace('/', '_').replace(' ', '_') return os.path.join(tempfile.gettempdir(), f"gunicorn-dirty-{safe_name}.pid") def _cleanup_orphaned_dirty_arbiter(self): """Kill any orphaned dirty arbiter from a previous crash. Only runs on fresh start (master_pid == 0), not during USR2. """ # During USR2, master_pid is set - don't cleanup old dirty arbiter if self.master_pid != 0: return pidfile = self._get_dirty_pidfile_path() if not os.path.exists(pidfile): return try: with open(pidfile) as f: old_pid = int(f.read().strip()) # Check if process exists os.kill(old_pid, 0) # Process exists - kill orphan self.log.warning("Killing orphaned dirty arbiter (pid: %s)", old_pid) os.kill(old_pid, signal.SIGTERM) # Wait briefly for graceful exit for _ in range(10): time.sleep(0.1) try: os.kill(old_pid, 0) except OSError: break else: os.kill(old_pid, signal.SIGKILL) except (ValueError, IOError, OSError): pass # Remove stale PID file try: os.unlink(pidfile) except OSError: pass def spawn_dirty_arbiter(self): """\ Spawn the dirty arbiter process. The dirty arbiter manages a separate pool of workers for long-running, blocking operations. """ # Lazy import for gevent compatibility (see #3482) from gunicorn.dirty import DirtyArbiter, set_dirty_socket_path if self.dirty_arbiter_pid: return # Already running # Cleanup any orphaned dirty arbiter from previous crash self._cleanup_orphaned_dirty_arbiter() # Get well-known PID file path self.dirty_pidfile = self._get_dirty_pidfile_path() self.dirty_arbiter = DirtyArbiter( self.cfg, self.log, pidfile=self.dirty_pidfile ) socket_path = self.dirty_arbiter.socket_path pid = os.fork() if pid != 0: # Parent process self.dirty_arbiter_pid = pid # Set socket path for HTTP workers to use set_dirty_socket_path(socket_path) os.environ['GUNICORN_DIRTY_SOCKET'] = socket_path self.log.info("Spawned dirty arbiter (pid: %s) at %s", pid, socket_path) return pid # Child process - run the dirty arbiter try: self.dirty_arbiter.run() sys.exit(0) except SystemExit: raise except Exception: self.log.exception("Exception in dirty arbiter process") sys.exit(-1) def kill_dirty_arbiter(self, sig): """\ Send a signal to the dirty arbiter. :attr sig: `signal.SIG*` value """ if not self.dirty_arbiter_pid: return try: os.kill(self.dirty_arbiter_pid, sig) except OSError as e: if e.errno == errno.ESRCH: self.dirty_arbiter_pid = 0 self.dirty_arbiter = None def reap_dirty_arbiter(self): """\ Reap the dirty arbiter process if it has exited. """ if not self.dirty_arbiter_pid: return try: wpid, status = os.waitpid(self.dirty_arbiter_pid, os.WNOHANG) if not wpid: return if os.WIFEXITED(status): exitcode = os.WEXITSTATUS(status) if exitcode != 0: self.log.error("Dirty arbiter (pid:%s) exited with code %s", wpid, exitcode) else: self.log.info("Dirty arbiter (pid:%s) exited", wpid) elif os.WIFSIGNALED(status): sig = os.WTERMSIG(status) self.log.warning("Dirty arbiter (pid:%s) killed by signal %s", wpid, sig) self.dirty_arbiter_pid = 0 self.dirty_arbiter = None except OSError as e: if e.errno == errno.ECHILD: self.dirty_arbiter_pid = 0 self.dirty_arbiter = None def manage_dirty_arbiter(self): """\ Maintain the dirty arbiter process by respawning if needed. """ if self.dirty_arbiter_pid: return # Already running if self.cfg.dirty_workers > 0 and self.cfg.dirty_apps: self.log.info("Spawning dirty arbiter...") self.spawn_dirty_arbiter() # ========================================================================= # Control Socket Management # ========================================================================= def _get_control_socket_path(self): """Get the control socket path, making relative paths absolute.""" socket_path = self.cfg.control_socket if not os.path.isabs(socket_path): socket_path = os.path.join(util.getcwd(), socket_path) return socket_path def _start_control_server(self): """\ Start the control socket server. The server runs in a background thread and accepts commands via Unix socket. """ if self.cfg.control_socket_disable: self.log.debug("Control socket disabled") return # Lazy import to avoid circular imports and gevent compatibility from gunicorn.ctl.server import ControlSocketServer socket_path = self._get_control_socket_path() socket_mode = self.cfg.control_socket_mode try: self._control_server = ControlSocketServer( self, socket_path, socket_mode ) self._control_server.start() except Exception as e: self.log.warning("Failed to start control socket: %s", e) self._control_server = None def _stop_control_server(self): """\ Stop the control socket server. """ if self._control_server: try: self._control_server.stop() except Exception as e: self.log.debug("Error stopping control server: %s", e) self._control_server = None benoitc-gunicorn-f5fb19e/gunicorn/asgi/000077500000000000000000000000001514360242400202325ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/asgi/__init__.py000066400000000000000000000014431514360242400223450ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI support for gunicorn. This module provides native ASGI worker support, using gunicorn's own HTTP parsing infrastructure adapted for async I/O. Components: - AsyncUnreader: Async socket reading with pushback buffer - AsyncRequest: Async HTTP request parser - ASGIProtocol: asyncio.Protocol implementation for HTTP handling - WebSocketProtocol: WebSocket protocol handler (RFC 6455) - LifespanManager: ASGI lifespan protocol support Usage: gunicorn -k asgi myapp:app """ from gunicorn.asgi.unreader import AsyncUnreader from gunicorn.asgi.message import AsyncRequest from gunicorn.asgi.lifespan import LifespanManager __all__ = ['AsyncUnreader', 'AsyncRequest', 'LifespanManager'] benoitc-gunicorn-f5fb19e/gunicorn/asgi/lifespan.py000066400000000000000000000142031514360242400224050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI lifespan protocol manager. Manages startup and shutdown events for ASGI applications, enabling frameworks like FastAPI to run initialization and cleanup code. """ import asyncio class LifespanManager: """Manages ASGI lifespan events (startup/shutdown). The lifespan protocol allows ASGI applications to run code at startup and shutdown. This is essential for applications that need to initialize database connections, caches, or other resources. ASGI lifespan messages: - Server sends: {"type": "lifespan.startup"} - App responds: {"type": "lifespan.startup.complete"} or {"type": "lifespan.startup.failed", "message": "..."} - Server sends: {"type": "lifespan.shutdown"} - App responds: {"type": "lifespan.shutdown.complete"} """ def __init__(self, app, logger, state=None): """Initialize the lifespan manager. Args: app: ASGI application callable logger: Logger instance state: Shared state dict for the application """ self.app = app self.logger = logger self.state = state if state is not None else {} self._startup_complete = asyncio.Event() self._shutdown_complete = asyncio.Event() self._startup_failed = False self._startup_error = None self._shutdown_error = None self._receive_queue = asyncio.Queue() self._task = None self._app_finished = False async def startup(self): """Run lifespan startup and wait for completion. Raises: RuntimeError: If startup fails or app doesn't support lifespan """ scope = { "type": "lifespan", "asgi": {"version": "3.0", "spec_version": "2.4"}, "state": self.state, } # Send startup event await self._receive_queue.put({"type": "lifespan.startup"}) # Run lifespan in background task self._task = asyncio.create_task(self._run_lifespan(scope)) # Wait for startup with timeout try: await asyncio.wait_for( self._startup_complete.wait(), timeout=30.0 # Reasonable startup timeout ) except asyncio.TimeoutError: if self._task: self._task.cancel() raise RuntimeError("Lifespan startup timed out") if self._startup_failed: if self._task: self._task.cancel() msg = self._startup_error or "Unknown error" raise RuntimeError(f"Lifespan startup failed: {msg}") self.logger.debug("ASGI lifespan startup complete") async def shutdown(self): """Signal shutdown and wait for completion. This should be called during graceful shutdown. """ if self._app_finished: self.logger.debug("ASGI lifespan already finished") return # Send shutdown event await self._receive_queue.put({"type": "lifespan.shutdown"}) # Wait for shutdown with timeout try: await asyncio.wait_for( self._shutdown_complete.wait(), timeout=30.0 # Reasonable shutdown timeout ) except asyncio.TimeoutError: self.logger.warning("Lifespan shutdown timed out") if self._shutdown_error: self.logger.error("Lifespan shutdown error: %s", self._shutdown_error) # Cancel the task if still running if self._task and not self._task.done(): self._task.cancel() try: await self._task except asyncio.CancelledError: pass self.logger.debug("ASGI lifespan shutdown complete") async def _run_lifespan(self, scope): """Run the ASGI lifespan protocol.""" try: await self.app(scope, self._receive, self._send) except asyncio.CancelledError: raise except Exception as e: self.logger.debug("Lifespan application raised: %s", e) # If startup hasn't completed, mark it as failed if not self._startup_complete.is_set(): self._startup_failed = True self._startup_error = str(e) self._startup_complete.set() # If shutdown hasn't completed, mark error elif not self._shutdown_complete.is_set(): self._shutdown_error = str(e) self._shutdown_complete.set() finally: self._app_finished = True # Ensure events are set to unblock waiters if not self._startup_complete.is_set(): self._startup_failed = True self._startup_error = "Application exited before startup complete" self._startup_complete.set() if not self._shutdown_complete.is_set(): self._shutdown_complete.set() async def _receive(self): """ASGI receive callable for lifespan.""" return await self._receive_queue.get() async def _send(self, message): """ASGI send callable for lifespan.""" msg_type = message["type"] if msg_type == "lifespan.startup.complete": self._startup_complete.set() self.logger.debug("Received lifespan.startup.complete") elif msg_type == "lifespan.startup.failed": self._startup_failed = True self._startup_error = message.get("message", "") self._startup_complete.set() self.logger.debug("Received lifespan.startup.failed: %s", self._startup_error) elif msg_type == "lifespan.shutdown.complete": self._shutdown_complete.set() self.logger.debug("Received lifespan.shutdown.complete") elif msg_type == "lifespan.shutdown.failed": self._shutdown_error = message.get("message", "") self._shutdown_complete.set() self.logger.debug("Received lifespan.shutdown.failed: %s", self._shutdown_error) benoitc-gunicorn-f5fb19e/gunicorn/asgi/message.py000066400000000000000000000605101514360242400222320ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Async version of gunicorn/http/message.py for ASGI workers. Reuses the parsing logic from the sync version, adapted for async I/O. """ import io import ipaddress import re import socket import struct from gunicorn.http.errors import ( ExpectationFailed, InvalidHeader, InvalidHeaderName, NoMoreData, InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders, UnsupportedTransferCoding, ObsoleteFolding, InvalidProxyLine, InvalidProxyHeader, ForbiddenProxyRequest, InvalidSchemeHeaders, ) from gunicorn.http.message import ( PP_V2_SIGNATURE, PPCommand, PPFamily, PPProtocol ) from gunicorn.util import bytes_to_str, split_request_uri MAX_REQUEST_LINE = 8190 MAX_HEADERS = 32768 DEFAULT_MAX_HEADERFIELD_SIZE = 8190 # Reuse regex patterns from sync version RFC9110_5_6_2_TOKEN_SPECIALS = r"!#$%&'*+-.^_`|~" TOKEN_RE = re.compile(r"[%s0-9a-zA-Z]+" % (re.escape(RFC9110_5_6_2_TOKEN_SPECIALS))) METHOD_BADCHAR_RE = re.compile("[a-z#]") VERSION_RE = re.compile(r"HTTP/(\d)\.(\d)") RFC9110_5_5_INVALID_AND_DANGEROUS = re.compile(r"[\0\r\n]") def _ip_in_allow_list(ip_str, allow_list, networks): """Check if IP address is in the allow list. Args: ip_str: The IP address string to check allow_list: The original allow list (strings, may contain "*") networks: Pre-computed ipaddress.ip_network objects from config """ if '*' in allow_list: return True try: ip = ipaddress.ip_address(ip_str) except ValueError: return False for network in networks: if ip in network: return True return False class AsyncRequest: """Async HTTP request parser. Parses HTTP/1.x requests using async I/O, reusing gunicorn's parsing logic where possible. """ def __init__(self, cfg, unreader, peer_addr, req_number=1): self.cfg = cfg self.unreader = unreader self.peer_addr = peer_addr self.remote_addr = peer_addr self.req_number = req_number self.version = None self.method = None self.uri = None self.path = None self.query = None self.fragment = None self.headers = [] self.trailers = [] self.scheme = "https" if cfg.is_ssl else "http" self.must_close = False self._expected_100_continue = False self.proxy_protocol_info = None # Request line limit self.limit_request_line = cfg.limit_request_line if (self.limit_request_line < 0 or self.limit_request_line >= MAX_REQUEST_LINE): self.limit_request_line = MAX_REQUEST_LINE # Headers limits self.limit_request_fields = cfg.limit_request_fields if (self.limit_request_fields <= 0 or self.limit_request_fields > MAX_HEADERS): self.limit_request_fields = MAX_HEADERS self.limit_request_field_size = cfg.limit_request_field_size if self.limit_request_field_size < 0: self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE # Max header buffer size max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE self.max_buffer_headers = self.limit_request_fields * \ (max_header_field_size + 2) + 4 # Body-related state self.content_length = None self.chunked = False self._body_reader = None self._body_remaining = 0 @classmethod async def parse(cls, cfg, unreader, peer_addr, req_number=1): """Parse an HTTP request from the stream. Args: cfg: gunicorn config object unreader: AsyncUnreader instance peer_addr: client address tuple req_number: request number on this connection (for keepalive) Returns: AsyncRequest: Parsed request object Raises: NoMoreData: If no data available Various parsing errors for malformed requests """ req = cls(cfg, unreader, peer_addr, req_number) await req._parse() return req async def _parse(self): """Parse the request from the unreader.""" buf = bytearray() await self._read_into(buf) # Handle proxy protocol if enabled and this is the first request mode = self.cfg.proxy_protocol if mode != "off" and self.req_number == 1: buf = await self._handle_proxy_protocol(buf, mode) # Get request line line, buf = await self._read_line(buf, self.limit_request_line) self._parse_request_line(line) # Headers data = bytes(buf) while True: idx = data.find(b"\r\n\r\n") done = data[:2] == b"\r\n" if idx < 0 and not done: await self._read_into(buf) data = bytes(buf) if len(data) > self.max_buffer_headers: raise LimitRequestHeaders("max buffer headers") else: break if done: self.unreader.unread(data[2:]) else: self.headers = self._parse_headers(data[:idx], from_trailer=False) self.unreader.unread(data[idx + 4:]) self._set_body_reader() async def _read_into(self, buf): """Read data from unreader and append to bytearray buffer.""" data = await self.unreader.read() if not data: raise NoMoreData(bytes(buf)) buf.extend(data) async def _read_line(self, buf, limit=0): """Read a line from buffer, returning (line, remaining_buffer).""" data = bytes(buf) while True: idx = data.find(b"\r\n") if idx >= 0: if idx > limit > 0: raise LimitRequestLine(idx, limit) break if len(data) - 2 > limit > 0: raise LimitRequestLine(len(data), limit) await self._read_into(buf) data = bytes(buf) return (data[:idx], bytearray(data[idx + 2:])) async def _handle_proxy_protocol(self, buf, mode): """Handle PROXY protocol detection and parsing. Returns the buffer with proxy protocol data consumed. """ # Ensure we have enough data to detect v2 signature (12 bytes) while len(buf) < 12: await self._read_into(buf) # Check for v2 signature first if mode in ("v2", "auto") and buf[:12] == PP_V2_SIGNATURE: self._proxy_protocol_access_check() return await self._parse_proxy_protocol_v2(buf) # Check for v1 prefix if mode in ("v1", "auto") and buf[:6] == b"PROXY ": self._proxy_protocol_access_check() return await self._parse_proxy_protocol_v1(buf) # Not proxy protocol - return buffer unchanged return buf def _proxy_protocol_access_check(self): """Check if proxy protocol is allowed from this peer.""" if (isinstance(self.peer_addr, tuple) and not _ip_in_allow_list(self.peer_addr[0], self.cfg.proxy_allow_ips, self.cfg.proxy_allow_networks())): raise ForbiddenProxyRequest(self.peer_addr[0]) async def _parse_proxy_protocol_v1(self, buf): """Parse PROXY protocol v1 (text format). Returns buffer with v1 header consumed. """ # Read until we find \r\n data = bytes(buf) while b"\r\n" not in data: await self._read_into(buf) data = bytes(buf) idx = data.find(b"\r\n") line = bytes_to_str(data[:idx]) remaining = bytearray(data[idx + 2:]) bits = line.split(" ") if len(bits) != 6: raise InvalidProxyLine(line) proto = bits[1] s_addr = bits[2] d_addr = bits[3] if proto not in ["TCP4", "TCP6"]: raise InvalidProxyLine("protocol '%s' not supported" % proto) if proto == "TCP4": try: socket.inet_pton(socket.AF_INET, s_addr) socket.inet_pton(socket.AF_INET, d_addr) except OSError: raise InvalidProxyLine(line) elif proto == "TCP6": try: socket.inet_pton(socket.AF_INET6, s_addr) socket.inet_pton(socket.AF_INET6, d_addr) except OSError: raise InvalidProxyLine(line) try: s_port = int(bits[4]) d_port = int(bits[5]) except ValueError: raise InvalidProxyLine("invalid port %s" % line) if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)): raise InvalidProxyLine("invalid port %s" % line) self.proxy_protocol_info = { "proxy_protocol": proto, "client_addr": s_addr, "client_port": s_port, "proxy_addr": d_addr, "proxy_port": d_port } return remaining async def _parse_proxy_protocol_v2(self, buf): """Parse PROXY protocol v2 (binary format). Returns buffer with v2 header consumed. """ # We need at least 16 bytes for the header (12 signature + 4 header) while len(buf) < 16: await self._read_into(buf) # Parse header fields (after 12-byte signature) ver_cmd = buf[12] fam_proto = buf[13] length = struct.unpack(">H", bytes(buf[14:16]))[0] # Validate version (high nibble must be 0x2) version = (ver_cmd & 0xF0) >> 4 if version != 2: raise InvalidProxyHeader("unsupported version %d" % version) # Extract command (low nibble) command = ver_cmd & 0x0F if command not in (PPCommand.LOCAL, PPCommand.PROXY): raise InvalidProxyHeader("unsupported command %d" % command) # Ensure we have the complete header total_header_size = 16 + length while len(buf) < total_header_size: await self._read_into(buf) # For LOCAL command, no address info is provided if command == PPCommand.LOCAL: self.proxy_protocol_info = { "proxy_protocol": "LOCAL", "client_addr": None, "client_port": None, "proxy_addr": None, "proxy_port": None } return bytearray(buf[total_header_size:]) # Extract address family and protocol family = (fam_proto & 0xF0) >> 4 protocol = fam_proto & 0x0F # We only support TCP (STREAM) if protocol != PPProtocol.STREAM: raise InvalidProxyHeader("only TCP protocol is supported") addr_data = bytes(buf[16:16 + length]) if family == PPFamily.INET: # IPv4 if length < 12: # 4+4+2+2 raise InvalidProxyHeader("insufficient address data for IPv4") s_addr = socket.inet_ntop(socket.AF_INET, addr_data[0:4]) d_addr = socket.inet_ntop(socket.AF_INET, addr_data[4:8]) s_port = struct.unpack(">H", addr_data[8:10])[0] d_port = struct.unpack(">H", addr_data[10:12])[0] proto = "TCP4" elif family == PPFamily.INET6: # IPv6 if length < 36: # 16+16+2+2 raise InvalidProxyHeader("insufficient address data for IPv6") s_addr = socket.inet_ntop(socket.AF_INET6, addr_data[0:16]) d_addr = socket.inet_ntop(socket.AF_INET6, addr_data[16:32]) s_port = struct.unpack(">H", addr_data[32:34])[0] d_port = struct.unpack(">H", addr_data[34:36])[0] proto = "TCP6" elif family == PPFamily.UNSPEC: # No address info provided with PROXY command self.proxy_protocol_info = { "proxy_protocol": "UNSPEC", "client_addr": None, "client_port": None, "proxy_addr": None, "proxy_port": None } return bytearray(buf[total_header_size:]) else: raise InvalidProxyHeader("unsupported address family %d" % family) # Set data self.proxy_protocol_info = { "proxy_protocol": proto, "client_addr": s_addr, "client_port": s_port, "proxy_addr": d_addr, "proxy_port": d_port } return bytearray(buf[total_header_size:]) def _parse_request_line(self, line_bytes): """Parse the HTTP request line.""" bits = [bytes_to_str(bit) for bit in line_bytes.split(b" ", 2)] if len(bits) != 3: raise InvalidRequestLine(bytes_to_str(line_bytes)) # Method self.method = bits[0] if not self.cfg.permit_unconventional_http_method: if METHOD_BADCHAR_RE.search(self.method): raise InvalidRequestMethod(self.method) if not 3 <= len(bits[0]) <= 20: raise InvalidRequestMethod(self.method) if not TOKEN_RE.fullmatch(self.method): raise InvalidRequestMethod(self.method) if self.cfg.casefold_http_method: self.method = self.method.upper() # URI self.uri = bits[1] if len(self.uri) == 0: raise InvalidRequestLine(bytes_to_str(line_bytes)) try: parts = split_request_uri(self.uri) except ValueError: raise InvalidRequestLine(bytes_to_str(line_bytes)) self.path = parts.path or "" self.query = parts.query or "" self.fragment = parts.fragment or "" # Version match = VERSION_RE.fullmatch(bits[2]) if match is None: raise InvalidHTTPVersion(bits[2]) self.version = (int(match.group(1)), int(match.group(2))) if not (1, 0) <= self.version < (2, 0): if not self.cfg.permit_unconventional_http_version: raise InvalidHTTPVersion(self.version) def _parse_headers(self, data, from_trailer=False): """Parse HTTP headers from raw data.""" cfg = self.cfg headers = [] lines = [bytes_to_str(line) for line in data.split(b"\r\n")] # Handle scheme headers scheme_header = False secure_scheme_headers = {} forwarder_headers = [] if from_trailer: pass elif (not isinstance(self.peer_addr, tuple) or _ip_in_allow_list(self.peer_addr[0], cfg.forwarded_allow_ips, cfg.forwarded_allow_networks())): secure_scheme_headers = cfg.secure_scheme_headers forwarder_headers = cfg.forwarder_headers while lines: if len(headers) >= self.limit_request_fields: raise LimitRequestHeaders("limit request headers fields") curr = lines.pop(0) header_length = len(curr) + len("\r\n") if curr.find(":") <= 0: raise InvalidHeader(curr) name, value = curr.split(":", 1) if self.cfg.strip_header_spaces: name = name.rstrip(" \t") if not TOKEN_RE.fullmatch(name): raise InvalidHeaderName(name) name = name.upper() value = [value.strip(" \t")] # Consume value continuation lines while lines and lines[0].startswith((" ", "\t")): if not self.cfg.permit_obsolete_folding: raise ObsoleteFolding(name) curr = lines.pop(0) header_length += len(curr) + len("\r\n") if header_length > self.limit_request_field_size > 0: raise LimitRequestHeaders("limit request headers fields size") value.append(curr.strip("\t ")) value = " ".join(value) if RFC9110_5_5_INVALID_AND_DANGEROUS.search(value): raise InvalidHeader(name) if header_length > self.limit_request_field_size > 0: raise LimitRequestHeaders("limit request headers fields size") if not from_trailer and name == "EXPECT": # https://datatracker.ietf.org/doc/html/rfc9110#section-10.1.1 # "The Expect field value is case-insensitive." if value.lower() == "100-continue": if self.version < (1, 1): # https://datatracker.ietf.org/doc/html/rfc9110#section-10.1.1-12 # "A server that receives a 100-continue expectation # in an HTTP/1.0 request MUST ignore that expectation." pass else: self._expected_100_continue = True # N.B. understood but ignored expect header does not return 417 else: raise ExpectationFailed(value) if name in secure_scheme_headers: secure = value == secure_scheme_headers[name] scheme = "https" if secure else "http" if scheme_header: if scheme != self.scheme: raise InvalidSchemeHeaders() else: scheme_header = True self.scheme = scheme if "_" in name: if name in forwarder_headers or "*" in forwarder_headers: pass elif self.cfg.header_map == "dangerous": pass elif self.cfg.header_map == "drop": continue else: raise InvalidHeaderName(name) headers.append((name, value)) return headers def _set_body_reader(self): """Determine how to read the request body.""" chunked = False content_length = None for (name, value) in self.headers: if name == "CONTENT-LENGTH": if content_length is not None: raise InvalidHeader("CONTENT-LENGTH", req=self) content_length = value elif name == "TRANSFER-ENCODING": vals = [v.strip() for v in value.split(',')] for val in vals: if val.lower() == "chunked": if chunked: raise InvalidHeader("TRANSFER-ENCODING", req=self) chunked = True elif val.lower() == "identity": if chunked: raise InvalidHeader("TRANSFER-ENCODING", req=self) elif val.lower() in ('compress', 'deflate', 'gzip'): if chunked: raise InvalidHeader("TRANSFER-ENCODING", req=self) self.force_close() else: raise UnsupportedTransferCoding(value) if chunked: if self.version < (1, 1): raise InvalidHeader("TRANSFER-ENCODING", req=self) if content_length is not None: raise InvalidHeader("CONTENT-LENGTH", req=self) self.chunked = True self.content_length = None self._body_remaining = -1 elif content_length is not None: try: if str(content_length).isnumeric(): content_length = int(content_length) else: raise InvalidHeader("CONTENT-LENGTH", req=self) except ValueError: raise InvalidHeader("CONTENT-LENGTH", req=self) if content_length < 0: raise InvalidHeader("CONTENT-LENGTH", req=self) self.content_length = content_length self._body_remaining = content_length else: # No body for requests without Content-Length or Transfer-Encoding self.content_length = 0 self._body_remaining = 0 def force_close(self): """Mark connection for closing after this request.""" self.must_close = True def should_close(self): """Check if connection should be closed after this request.""" if self.must_close: return True for (h, v) in self.headers: if h == "CONNECTION": v = v.lower().strip(" \t") if v == "close": return True elif v == "keep-alive": return False break return self.version <= (1, 0) def get_header(self, name): """Get a header value by name (case-insensitive).""" name = name.upper() for (h, v) in self.headers: if h == name: return v return None async def read_body(self, size=8192): """Read a chunk of the request body. Args: size: Maximum bytes to read Returns: bytes: Body data, empty bytes when body is exhausted """ if self._body_remaining == 0: return b"" if self.chunked: return await self._read_chunked_body(size) else: return await self._read_length_body(size) async def _read_length_body(self, size): """Read from a length-delimited body.""" if self._body_remaining <= 0: return b"" to_read = min(size, self._body_remaining) data = await self.unreader.read(to_read) if data: self._body_remaining -= len(data) return data async def _read_chunked_body(self, size): """Read from a chunked body.""" if self._body_reader is None: self._body_reader = self._chunked_body_reader() try: return await anext(self._body_reader) except StopAsyncIteration: self._body_remaining = 0 return b"" async def _chunked_body_reader(self): """Async generator for reading chunked body.""" while True: # Read chunk size line size_line = await self._read_chunk_size_line() # Parse chunk size (handle extensions) chunk_size, *_ = size_line.split(b";", 1) if _: chunk_size = chunk_size.rstrip(b" \t") if any(n not in b"0123456789abcdefABCDEF" for n in chunk_size): raise InvalidHeader("Invalid chunk size") if len(chunk_size) == 0: raise InvalidHeader("Invalid chunk size") chunk_size = int(chunk_size, 16) if chunk_size == 0: # Final chunk - skip trailers and final CRLF await self._skip_trailers() return # Read chunk data remaining = chunk_size while remaining > 0: data = await self.unreader.read(min(remaining, 8192)) if not data: raise NoMoreData() remaining -= len(data) yield data # Skip chunk terminating CRLF crlf = await self.unreader.read(2) if crlf != b"\r\n": # May have partial read, try to get the rest while len(crlf) < 2: more = await self.unreader.read(2 - len(crlf)) if not more: break crlf += more if crlf != b"\r\n": raise InvalidHeader("Missing chunk terminator") async def _read_chunk_size_line(self): """Read a chunk size line.""" buf = io.BytesIO() while True: data = await self.unreader.read(1) if not data: raise NoMoreData() buf.write(data) if buf.getvalue().endswith(b"\r\n"): return buf.getvalue()[:-2] async def _skip_trailers(self): """Skip trailer headers after chunked body.""" buf = io.BytesIO() while True: data = await self.unreader.read(1) if not data: return buf.write(data) content = buf.getvalue() if content.endswith(b"\r\n\r\n"): # Could parse trailers here if needed return if content == b"\r\n": return async def drain_body(self): """Drain any unread body data. Should be called before reusing connection for keepalive. """ while True: data = await self.read_body(8192) if not data: break benoitc-gunicorn-f5fb19e/gunicorn/asgi/protocol.py000066400000000000000000001054671514360242400224620ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI protocol handler for gunicorn. Implements asyncio.Protocol to handle HTTP/1.x and HTTP/2 connections and dispatch to ASGI applications. """ import asyncio import errno from datetime import datetime from gunicorn.asgi.unreader import AsyncUnreader from gunicorn.asgi.message import AsyncRequest from gunicorn.asgi.uwsgi import AsyncUWSGIRequest from gunicorn.http.errors import NoMoreData from gunicorn.uwsgi.errors import UWSGIParseException def _normalize_sockaddr(sockaddr): """Normalize socket address to ASGI-compatible (host, port) tuple. ASGI spec requires server/client to be (host, port) tuples. IPv6 sockets return 4-tuples (host, port, flowinfo, scope_id), so we extract just the first two elements. """ return tuple(sockaddr[:2]) if sockaddr else None class ASGIResponseInfo: """Simple container for ASGI response info for access logging.""" def __init__(self, status, headers, sent): self.status = status self.sent = sent # Convert headers to list of string tuples for logging self.headers = [] for name, value in headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") self.headers.append((name, value)) class ASGIProtocol(asyncio.Protocol): """HTTP/1.1 protocol handler for ASGI applications. Handles connection lifecycle, request parsing, and ASGI app invocation. """ def __init__(self, worker): self.worker = worker self.cfg = worker.cfg self.log = worker.log self.app = worker.asgi self.transport = None self.reader = None self.writer = None self._task = None self.req_count = 0 # Connection state self._closed = False self._receive_queue = None # Set per-request for disconnect signaling def connection_made(self, transport): """Called when a connection is established.""" self.transport = transport self.worker.nr_conns += 1 # Check if HTTP/2 was negotiated via ALPN ssl_object = transport.get_extra_info('ssl_object') if ssl_object and hasattr(ssl_object, 'selected_alpn_protocol'): alpn = ssl_object.selected_alpn_protocol() if alpn == 'h2': # HTTP/2 connection - create reader immediately to avoid race condition # data_received may be called before _handle_http2_connection starts self.reader = asyncio.StreamReader() self._task = self.worker.loop.create_task( self._handle_http2_connection(transport, ssl_object) ) return # HTTP/1.x connection # Create stream reader/writer self.reader = asyncio.StreamReader() self.writer = transport # Start handling requests self._task = self.worker.loop.create_task(self._handle_connection()) def data_received(self, data): """Called when data is received on the connection.""" if self.reader: self.reader.feed_data(data) def connection_lost(self, exc): """Called when the connection is lost or closed. Instead of immediately cancelling the task, we signal a disconnect event and send an http.disconnect message to the receive queue. This allows the ASGI app to clean up resources (like database connections) gracefully before the task is cancelled. See: https://github.com/benoitc/gunicorn/issues/3484 """ # Guard against multiple calls (idempotent) if self._closed: return self._closed = True self.worker.nr_conns -= 1 if self.reader: self.reader.feed_eof() # Signal disconnect to the app via the receive queue if self._receive_queue is not None: self._receive_queue.put_nowait({"type": "http.disconnect"}) # Schedule task cancellation after grace period if task doesn't complete if self._task and not self._task.done(): grace_period = getattr(self.cfg, 'asgi_disconnect_grace_period', 3) if grace_period > 0: self.worker.loop.call_later( grace_period, self._cancel_task_if_pending ) else: # Grace period of 0 means cancel immediately self._task.cancel() def _cancel_task_if_pending(self): """Cancel the task if it's still pending after grace period.""" if self._task and not self._task.done(): self._task.cancel() def _safe_write(self, data): """Write data to transport, handling connection errors gracefully. Catches exceptions that occur when the client has disconnected: - OSError with errno EPIPE, ECONNRESET, ENOTCONN - RuntimeError when transport is closing/closed - AttributeError when transport is None These are silently ignored since the client is already gone. """ try: self.transport.write(data) except OSError as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN): self.log.exception("Socket error writing response.") except (RuntimeError, AttributeError): # Transport is closing/closed or None pass async def _handle_connection(self): """Main request handling loop for this connection.""" unreader = AsyncUnreader(self.reader) try: peername = self.transport.get_extra_info('peername') sockname = self.transport.get_extra_info('sockname') while not self._closed: self.req_count += 1 try: # Parse request based on protocol protocol = getattr(self.cfg, 'protocol', 'http') if protocol == 'uwsgi': request = await AsyncUWSGIRequest.parse( self.cfg, unreader, peername, self.req_count ) else: request = await AsyncRequest.parse( self.cfg, unreader, peername, self.req_count ) except NoMoreData: # Client disconnected break except UWSGIParseException as e: self.log.debug("uWSGI parse error: %s", e) break # Check for WebSocket upgrade if self._is_websocket_upgrade(request): await self._handle_websocket(request, sockname, peername) break # WebSocket takes over the connection # Handle HTTP request keepalive = await self._handle_http_request( request, sockname, peername ) # Increment worker request count self.worker.nr += 1 # Check max_requests if self.worker.nr >= self.worker.max_requests: self.log.info("Autorestarting worker after current request.") self.worker.alive = False keepalive = False if not keepalive or not self.worker.alive: break # Check connection limits for keepalive if not self.cfg.keepalive: break # Drain any unread body before next request await request.drain_body() except asyncio.CancelledError: pass except Exception as e: self.log.exception("Error handling connection: %s", e) finally: self._close_transport() def _is_websocket_upgrade(self, request): """Check if request is a WebSocket upgrade. Per RFC 6455 Section 4.1, the opening handshake requires: - HTTP method MUST be GET - Upgrade header MUST be "websocket" (case-insensitive) - Connection header MUST contain "Upgrade" """ # RFC 6455: The method of the request MUST be GET if request.method != "GET": return False upgrade = None connection = None for name, value in request.headers: if name == "UPGRADE": upgrade = value.lower() elif name == "CONNECTION": connection = value.lower() return upgrade == "websocket" and connection and "upgrade" in connection async def _handle_websocket(self, request, sockname, peername): """Handle WebSocket upgrade request.""" from gunicorn.asgi.websocket import WebSocketProtocol scope = self._build_websocket_scope(request, sockname, peername) ws_protocol = WebSocketProtocol( self.transport, self.reader, scope, self.app, self.log ) await ws_protocol.run() async def _handle_http_request(self, request, sockname, peername): """Handle a single HTTP request.""" scope = self._build_http_scope(request, sockname, peername) response_started = False response_complete = False exc_to_raise = None use_chunked = False # Response tracking for access logging response_status = 500 response_headers = [] response_sent = 0 # Receive queue for body - stored on self for disconnect signaling receive_queue = asyncio.Queue() self._receive_queue = receive_queue body_complete = False # Pre-populate with initial body state if request.content_length == 0 and not request.chunked: await receive_queue.put({ "type": "http.request", "body": b"", "more_body": False, }) body_complete = True else: # Start body reading task asyncio.create_task(self._read_body_to_queue(request, receive_queue)) async def receive(): nonlocal body_complete # Check if already disconnected before waiting if self._closed and body_complete: return {"type": "http.disconnect"} msg = await receive_queue.get() # Track when body is complete if msg.get("type") == "http.request" and not msg.get("more_body", True): body_complete = True return msg async def send(message): nonlocal response_started, response_complete, exc_to_raise nonlocal response_status, response_headers, response_sent, use_chunked # If client disconnected, silently ignore send attempts # This allows apps to finish cleanup without errors if self._closed: return msg_type = message["type"] if msg_type == "http.response.informational": # Handle informational responses (1xx) like 103 Early Hints info_status = message.get("status") info_headers = message.get("headers", []) await self._send_informational(info_status, info_headers, request) return if msg_type == "http.response.start": if response_started: exc_to_raise = RuntimeError("Response already started") return response_started = True response_status = message["status"] response_headers = message.get("headers", []) # Check if Content-Length is present has_content_length = any( (name.lower() if isinstance(name, str) else name.lower()) == b"content-length" or (name.lower() if isinstance(name, str) else name.lower()) == "content-length" for name, _ in response_headers ) # Use chunked encoding for HTTP/1.1 streaming responses without Content-Length if not has_content_length and request.version >= (1, 1): use_chunked = True response_headers = list(response_headers) + [(b"transfer-encoding", b"chunked")] await self._send_response_start(response_status, response_headers, request) elif msg_type == "http.response.body": if not response_started: exc_to_raise = RuntimeError("Response not started") return if response_complete: exc_to_raise = RuntimeError("Response already complete") return body = message.get("body", b"") more_body = message.get("more_body", False) if body: await self._send_body(body, chunked=use_chunked) response_sent += len(body) if not more_body: if use_chunked: # Send terminal chunk self._safe_write(b"0\r\n\r\n") response_complete = True # Build environ for logging environ = self._build_environ(request, sockname, peername) resp = None try: request_start = datetime.now() self.cfg.pre_request(self.worker, request) await self.app(scope, receive, send) if exc_to_raise is not None: raise exc_to_raise # Ensure response was sent if not response_started: await self._send_error_response(500, "Internal Server Error") response_status = 500 except asyncio.CancelledError: # Client disconnected - don't log as error, this is normal self.log.debug("Request cancelled (client disconnected)") return False except Exception: self.log.exception("Error in ASGI application") if not response_started: await self._send_error_response(500, "Internal Server Error") response_status = 500 return False finally: # Clear the receive queue reference self._receive_queue = None try: request_time = datetime.now() - request_start # Create response info for logging resp = ASGIResponseInfo(response_status, response_headers, response_sent) self.log.access(resp, request, environ, request_time) self.cfg.post_request(self.worker, request, environ, resp) except Exception: self.log.exception("Exception in post_request hook") # Determine keepalive if request.should_close(): return False return self.worker.alive and self.cfg.keepalive async def _read_body_to_queue(self, request, queue): """Read request body and put chunks on the queue.""" try: while True: chunk = await request.read_body(65536) if chunk: await queue.put({ "type": "http.request", "body": chunk, "more_body": True, }) else: await queue.put({ "type": "http.request", "body": b"", "more_body": False, }) break except Exception as e: self.log.debug("Error reading body: %s", e) await queue.put({ "type": "http.request", "body": b"", "more_body": False, }) def _build_http_scope(self, request, sockname, peername): """Build ASGI HTTP scope from parsed request.""" # Build headers list as bytes tuples headers = [] for name, value in request.headers: headers.append((name.lower().encode("latin-1"), value.encode("latin-1"))) server = _normalize_sockaddr(sockname) client = _normalize_sockaddr(peername) scope = { "type": "http", "asgi": {"version": "3.0", "spec_version": "2.4"}, "http_version": f"{request.version[0]}.{request.version[1]}", "method": request.method, "scheme": request.scheme, "path": request.path, "raw_path": request.path.encode("latin-1") if request.path else b"", "query_string": request.query.encode("latin-1") if request.query else b"", "root_path": self.cfg.root_path or "", "headers": headers, "server": server, "client": client, } # Add state dict for lifespan sharing if hasattr(self.worker, 'state'): scope["state"] = self.worker.state # Add HTTP/2 priority extension if available if hasattr(request, 'priority_weight'): scope["extensions"] = { "http.response.priority": { "weight": request.priority_weight, "depends_on": request.priority_depends_on, } } return scope def _build_environ(self, request, sockname, peername): """Build minimal WSGI-like environ dict for access logging.""" environ = { "REQUEST_METHOD": request.method, "RAW_URI": request.uri, "PATH_INFO": request.path, "QUERY_STRING": request.query or "", "SERVER_PROTOCOL": f"HTTP/{request.version[0]}.{request.version[1]}", "REMOTE_ADDR": peername[0] if peername else "-", } # Add HTTP headers as environ vars for name, value in request.headers: key = "HTTP_" + name.replace("-", "_") environ[key] = value return environ def _build_websocket_scope(self, request, sockname, peername): """Build ASGI WebSocket scope from parsed request.""" # Build headers list as bytes tuples headers = [] for name, value in request.headers: headers.append((name.lower().encode("latin-1"), value.encode("latin-1"))) # Extract subprotocols from Sec-WebSocket-Protocol header subprotocols = [] for name, value in request.headers: if name == "SEC-WEBSOCKET-PROTOCOL": subprotocols = [s.strip() for s in value.split(",")] break server = _normalize_sockaddr(sockname) client = _normalize_sockaddr(peername) scope = { "type": "websocket", "asgi": {"version": "3.0", "spec_version": "2.4"}, "http_version": f"{request.version[0]}.{request.version[1]}", "scheme": "wss" if request.scheme == "https" else "ws", "path": request.path, "raw_path": request.path.encode("latin-1") if request.path else b"", "query_string": request.query.encode("latin-1") if request.query else b"", "root_path": self.cfg.root_path or "", "headers": headers, "server": server, "client": client, "subprotocols": subprotocols, } # Add state dict for lifespan sharing if hasattr(self.worker, 'state'): scope["state"] = self.worker.state return scope async def _send_informational(self, status, headers, request): """Send an informational response (1xx) such as 103 Early Hints. Args: status: HTTP status code (100-199) headers: List of (name, value) header tuples request: The parsed request object Note: Informational responses are only sent for HTTP/1.1 or later. HTTP/1.0 clients do not support 1xx responses. """ # Don't send informational responses to HTTP/1.0 clients if request.version < (1, 1): return reason = self._get_reason_phrase(status) response = f"HTTP/{request.version[0]}.{request.version[1]} {status} {reason}\r\n" for name, value in headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") response += f"{name}: {value}\r\n" response += "\r\n" self._safe_write(response.encode("latin-1")) async def _send_response_start(self, status, headers, request): """Send HTTP response status and headers.""" # Build status line reason = self._get_reason_phrase(status) status_line = f"HTTP/{request.version[0]}.{request.version[1]} {status} {reason}\r\n" # Build headers header_lines = [] for name, value in headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") header_lines.append(f"{name}: {value}\r\n") # Add server header if not present header_lines.append("Server: gunicorn/asgi\r\n") response = status_line + "".join(header_lines) + "\r\n" self._safe_write(response.encode("latin-1")) async def _send_body(self, body, chunked=False): """Send response body chunk.""" if body: if chunked: # Chunked encoding: size in hex + CRLF + data + CRLF chunk = f"{len(body):x}\r\n".encode("latin-1") + body + b"\r\n" self._safe_write(chunk) else: self._safe_write(body) async def _send_error_response(self, status, message): """Send an error response.""" body = message.encode("utf-8") response = ( f"HTTP/1.1 {status} {message}\r\n" f"Content-Type: text/plain\r\n" f"Content-Length: {len(body)}\r\n" f"Connection: close\r\n" f"\r\n" ) self._safe_write(response.encode("latin-1")) self._safe_write(body) def _get_reason_phrase(self, status): """Get HTTP reason phrase for status code.""" reasons = { 100: "Continue", 101: "Switching Protocols", 103: "Early Hints", 200: "OK", 201: "Created", 202: "Accepted", 204: "No Content", 206: "Partial Content", 301: "Moved Permanently", 302: "Found", 303: "See Other", 304: "Not Modified", 307: "Temporary Redirect", 308: "Permanent Redirect", 400: "Bad Request", 401: "Unauthorized", 403: "Forbidden", 404: "Not Found", 405: "Method Not Allowed", 408: "Request Timeout", 409: "Conflict", 410: "Gone", 411: "Length Required", 413: "Payload Too Large", 414: "URI Too Long", 415: "Unsupported Media Type", 422: "Unprocessable Entity", 429: "Too Many Requests", 500: "Internal Server Error", 501: "Not Implemented", 502: "Bad Gateway", 503: "Service Unavailable", 504: "Gateway Timeout", } return reasons.get(status, "Unknown") def _close_transport(self): """Close the transport safely. Calls write_eof() first if supported to signal end of writing, which helps ensure buffered data is flushed before closing. """ if self.transport and not self._closed: try: # Signal end of writing to help flush buffers if self.transport.can_write_eof(): self.transport.write_eof() self.transport.close() except Exception: pass self._closed = True async def _handle_http2_connection(self, transport, ssl_object): """Handle an HTTP/2 connection.""" try: from gunicorn.http2.async_connection import AsyncHTTP2Connection peername = transport.get_extra_info('peername') sockname = transport.get_extra_info('sockname') # Use the reader created in connection_made # (data_received feeds data to self.reader) reader = self.reader protocol = asyncio.StreamReaderProtocol(reader) writer = asyncio.StreamWriter( transport, protocol, reader, self.worker.loop ) # Create HTTP/2 connection handler h2_conn = AsyncHTTP2Connection( self.cfg, reader, writer, peername ) await h2_conn.initiate_connection() self._h2_conn = h2_conn # Main loop - receive and handle requests while not h2_conn.is_closed and self.worker.alive: try: requests = await h2_conn.receive_data(timeout=1.0) except asyncio.TimeoutError: continue except Exception as e: self.log.debug("HTTP/2 receive error: %s", e) break for req in requests: try: await self._handle_http2_request( req, h2_conn, sockname, peername ) except Exception as e: self.log.exception("Error handling HTTP/2 request") try: await h2_conn.send_error( req.stream.stream_id, 500, str(e) ) except Exception: pass finally: h2_conn.cleanup_stream(req.stream.stream_id) # Increment worker request count self.worker.nr += len(requests) # Check max_requests if self.worker.nr >= self.worker.max_requests: self.log.info("Autorestarting worker after current request.") self.worker.alive = False break except asyncio.CancelledError: pass except Exception as e: self.log.exception("HTTP/2 connection error: %s", e) finally: if hasattr(self, '_h2_conn'): try: await self._h2_conn.close() except Exception: pass self._close_transport() async def _handle_http2_request(self, request, h2_conn, sockname, peername): """Handle a single HTTP/2 request.""" stream_id = request.stream.stream_id scope = self._build_http2_scope(request, sockname, peername) response_started = False response_complete = False exc_to_raise = None response_status = 500 response_headers = [] response_body = b'' response_trailers = [] async def receive(): # For HTTP/2, the body is already buffered in the stream body = request.body.read() return { "type": "http.request", "body": body, "more_body": False, } async def send(message): nonlocal response_started, response_complete, exc_to_raise nonlocal response_status, response_headers, response_body msg_type = message["type"] if msg_type == "http.response.informational": # Handle informational responses (1xx) like 103 Early Hints over HTTP/2 info_status = message.get("status") info_headers = message.get("headers", []) # Convert headers to list of string tuples headers = [] for name, value in info_headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") headers.append((name, value)) await h2_conn.send_informational(stream_id, info_status, headers) return if msg_type == "http.response.start": if response_started: exc_to_raise = RuntimeError("Response already started") return response_started = True response_status = message["status"] response_headers = message.get("headers", []) elif msg_type == "http.response.body": if not response_started: exc_to_raise = RuntimeError("Response not started") return if response_complete: exc_to_raise = RuntimeError("Response already complete") return body = message.get("body", b"") more_body = message.get("more_body", False) if body: response_body += body if not more_body: response_complete = True elif msg_type == "http.response.trailers": if not response_complete: exc_to_raise = RuntimeError("Cannot send trailers before body complete") return trailer_headers = message.get("headers", []) # Convert to list of tuples with string values trailers = [] for name, value in trailer_headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") trailers.append((name, value)) response_trailers.extend(trailers) # Build environ for logging environ = self._build_http2_environ(request, sockname, peername) request_start = datetime.now() try: self.cfg.pre_request(self.worker, request) await self.app(scope, receive, send) if exc_to_raise is not None: raise exc_to_raise # Send response via HTTP/2 if response_started: # Convert headers to list of tuples headers = [] for name, value in response_headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") headers.append((name, value)) if response_trailers: # Send headers, body, then trailers separately response_hdrs = [(':status', str(response_status))] for name, value in headers: response_hdrs.append((name.lower(), str(value))) # Send headers without ending stream h2_conn.h2_conn.send_headers(stream_id, response_hdrs, end_stream=False) stream = h2_conn.streams[stream_id] stream.send_headers(response_hdrs, end_stream=False) await h2_conn._send_pending_data() # Send body without ending stream if response_body: h2_conn.h2_conn.send_data(stream_id, response_body, end_stream=False) stream.send_data(response_body, end_stream=False) await h2_conn._send_pending_data() # Send trailers (ends stream) await h2_conn.send_trailers(stream_id, response_trailers) else: await h2_conn.send_response( stream_id, response_status, headers, response_body ) else: await h2_conn.send_error(stream_id, 500, "Internal Server Error") response_status = 500 except Exception: self.log.exception("Error in ASGI application") if not response_started: await h2_conn.send_error(stream_id, 500, "Internal Server Error") response_status = 500 finally: try: request_time = datetime.now() - request_start resp = ASGIResponseInfo( response_status, response_headers, len(response_body) ) self.log.access(resp, request, environ, request_time) self.cfg.post_request(self.worker, request, environ, resp) except Exception: self.log.exception("Exception in post_request hook") def _build_http2_scope(self, request, sockname, peername): """Build ASGI HTTP scope from HTTP/2 request.""" headers = [] for name, value in request.headers: headers.append(( name.lower().encode("latin-1"), value.encode("latin-1") )) server = _normalize_sockaddr(sockname) client = _normalize_sockaddr(peername) scope = { "type": "http", "asgi": {"version": "3.0", "spec_version": "2.4"}, "http_version": "2", "method": request.method, "scheme": request.scheme, "path": request.path, "raw_path": request.path.encode("latin-1") if request.path else b"", "query_string": request.query.encode("latin-1") if request.query else b"", "root_path": self.cfg.root_path or "", "headers": headers, "server": server, "client": client, } if hasattr(self.worker, 'state'): scope["state"] = self.worker.state # Add HTTP/2 extensions extensions = {} if hasattr(request, 'priority_weight'): extensions["http.response.priority"] = { "weight": request.priority_weight, "depends_on": request.priority_depends_on, } # Add trailer support extension for HTTP/2 extensions["http.response.trailers"] = {} scope["extensions"] = extensions return scope def _build_http2_environ(self, request, sockname, peername): """Build minimal environ dict for access logging.""" environ = { "REQUEST_METHOD": request.method, "RAW_URI": request.uri, "PATH_INFO": request.path, "QUERY_STRING": request.query or "", "SERVER_PROTOCOL": "HTTP/2", "REMOTE_ADDR": peername[0] if peername else "-", } for name, value in request.headers: key = "HTTP_" + name.replace("-", "_") environ[key] = value return environ benoitc-gunicorn-f5fb19e/gunicorn/asgi/unreader.py000066400000000000000000000055011514360242400224120ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Async version of gunicorn/http/unreader.py for ASGI workers. Provides async reading with pushback buffer support. """ import io class AsyncUnreader: """Async socket reader with pushback buffer support. This class wraps an asyncio StreamReader and provides the ability to "unread" data back into a buffer for re-parsing. """ def __init__(self, reader, max_chunk=8192): """Initialize the async unreader. Args: reader: asyncio.StreamReader instance max_chunk: Maximum bytes to read at once """ self.reader = reader self.buf = io.BytesIO() self.max_chunk = max_chunk async def read(self, size=None): """Read data from the stream, using buffered data first. Args: size: Number of bytes to read. If None, returns all buffered data or reads a single chunk. Returns: bytes: Data read from buffer or stream """ if size is not None and not isinstance(size, int): raise TypeError("size parameter must be an int or long.") if size is not None: if size == 0: return b"" if size < 0: size = None # Move to end to check buffer size self.buf.seek(0, io.SEEK_END) # If no size specified, return buffered data or read chunk if size is None and self.buf.tell(): ret = self.buf.getvalue() self.buf = io.BytesIO() return ret if size is None: chunk = await self._read_chunk() return chunk # Read until we have enough data while self.buf.tell() < size: chunk = await self._read_chunk() if not chunk: ret = self.buf.getvalue() self.buf = io.BytesIO() return ret self.buf.write(chunk) data = self.buf.getvalue() self.buf = io.BytesIO() self.buf.write(data[size:]) return data[:size] async def _read_chunk(self): """Read a chunk of data from the underlying stream.""" try: return await self.reader.read(self.max_chunk) except Exception: return b"" def unread(self, data): """Push data back into the buffer for re-reading. Args: data: bytes to push back """ if data: self.buf.seek(0, io.SEEK_END) self.buf.write(data) def has_buffered_data(self): """Check if there's data in the pushback buffer.""" pos = self.buf.tell() self.buf.seek(0, io.SEEK_END) has_data = self.buf.tell() > 0 self.buf.seek(pos) return has_data benoitc-gunicorn-f5fb19e/gunicorn/asgi/uwsgi.py000066400000000000000000000126231514360242400217460ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Async uWSGI protocol parser for ASGI workers. Reuses the parsing logic from gunicorn/uwsgi/message.py, only async I/O differs. """ from gunicorn.uwsgi.message import UWSGIRequest from gunicorn.uwsgi.errors import ( InvalidUWSGIHeader, UnsupportedModifier, ) class AsyncUWSGIRequest(UWSGIRequest): """Async version of UWSGIRequest. Reuses all parsing logic from the sync version, only async I/O differs. The following methods are reused from the parent class: - _parse_vars() - pure parsing, no I/O - _extract_request_info() - pure transformation - _check_allowed_ip() - no I/O - should_close() - simple logic """ # pylint: disable=super-init-not-called def __init__(self, cfg, unreader, peer_addr, req_number=1): # Don't call super().__init__ - it does sync parsing # Just initialize attributes self.cfg = cfg self.unreader = unreader self.peer_addr = peer_addr self.remote_addr = peer_addr self.req_number = req_number # Initialize all attributes (same as sync version) self.method = None self.uri = None self.path = None self.query = None self.fragment = "" self.version = (1, 1) self.headers = [] self.trailers = [] self.body = None self.scheme = "https" if cfg.is_ssl else "http" self.must_close = False self.uwsgi_vars = {} self.modifier1 = 0 self.modifier2 = 0 self.proxy_protocol_info = None # Body state self.content_length = 0 self.chunked = False self._body_remaining = 0 # Async factory method - intentionally differs from sync parent: # - async instead of sync (invalid-overridden-method) # - different signature for async I/O (arguments-differ) # pylint: disable=arguments-differ,invalid-overridden-method @classmethod async def parse(cls, cfg, unreader, peer_addr, req_number=1): """Parse a uWSGI request asynchronously. Args: cfg: gunicorn config object unreader: AsyncUnreader instance peer_addr: client address tuple req_number: request number on this connection (for keepalive) Returns: AsyncUWSGIRequest: Parsed request object Raises: InvalidUWSGIHeader: If the uWSGI header is malformed UnsupportedModifier: If modifier1 is not 0 ForbiddenUWSGIRequest: If source IP is not allowed """ req = cls(cfg, unreader, peer_addr, req_number) req._check_allowed_ip() # Reuse from parent await req._async_parse() return req async def _async_parse(self): """Async version of parse() - reads data then uses sync parsing.""" # Read 4-byte header header = await self._async_read_exact(4) if len(header) < 4: raise InvalidUWSGIHeader("incomplete header") self.modifier1 = header[0] datasize = int.from_bytes(header[1:3], 'little') self.modifier2 = header[3] if self.modifier1 != 0: raise UnsupportedModifier(self.modifier1) # Read vars block if datasize > 0: vars_data = await self._async_read_exact(datasize) if len(vars_data) < datasize: raise InvalidUWSGIHeader("incomplete vars block") self._parse_vars(vars_data) # Reuse sync method self._extract_request_info() # Reuse sync method self._set_body_reader() async def _async_read_exact(self, size): """Read exactly size bytes asynchronously.""" buf = bytearray() while len(buf) < size: chunk = await self.unreader.read(size - len(buf)) if not chunk: break buf.extend(chunk) return bytes(buf) def _set_body_reader(self): """Set up body state for async reading.""" content_length = 0 if 'CONTENT_LENGTH' in self.uwsgi_vars: try: content_length = max(int(self.uwsgi_vars['CONTENT_LENGTH']), 0) except ValueError: content_length = 0 self.content_length = content_length self._body_remaining = content_length async def read_body(self, size=8192): """Read body chunk asynchronously. Args: size: Maximum bytes to read Returns: bytes: Body data, empty bytes when body is exhausted """ if self._body_remaining <= 0: return b"" to_read = min(size, self._body_remaining) data = await self.unreader.read(to_read) if data: self._body_remaining -= len(data) return data async def drain_body(self): """Drain unread body data. Should be called before reusing connection for keepalive. """ while self._body_remaining > 0: data = await self.read_body(8192) if not data: break def get_header(self, name): """Get header by name (case-insensitive). Args: name: Header name to look up Returns: Header value if found, None otherwise """ name = name.upper() for h, v in self.headers: if h == name: return v return None benoitc-gunicorn-f5fb19e/gunicorn/asgi/websocket.py000066400000000000000000000274611514360242400226040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WebSocket protocol handler for ASGI. Implements RFC 6455 WebSocket protocol for ASGI applications. """ import asyncio import base64 import hashlib import struct # WebSocket frame opcodes OPCODE_CONTINUATION = 0x0 OPCODE_TEXT = 0x1 OPCODE_BINARY = 0x2 OPCODE_CLOSE = 0x8 OPCODE_PING = 0x9 OPCODE_PONG = 0xA # WebSocket close codes CLOSE_NORMAL = 1000 CLOSE_GOING_AWAY = 1001 CLOSE_PROTOCOL_ERROR = 1002 CLOSE_UNSUPPORTED = 1003 CLOSE_NO_STATUS = 1005 CLOSE_ABNORMAL = 1006 CLOSE_INVALID_DATA = 1007 CLOSE_POLICY_VIOLATION = 1008 CLOSE_MESSAGE_TOO_BIG = 1009 CLOSE_MANDATORY_EXT = 1010 CLOSE_INTERNAL_ERROR = 1011 # WebSocket handshake GUID (RFC 6455) WS_GUID = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11" class WebSocketProtocol: """WebSocket connection handler for ASGI applications.""" def __init__(self, transport, reader, scope, app, log): """Initialize WebSocket protocol handler. Args: transport: asyncio transport for writing reader: asyncio StreamReader for reading scope: ASGI WebSocket scope dict app: ASGI application callable log: Logger instance """ self.transport = transport self.reader = reader self.scope = scope self.app = app self.log = log self.accepted = False self.closed = False self.close_code = None self.close_reason = "" # Message reassembly state self._fragments = [] self._fragment_opcode = None # Receive queue for incoming messages self._receive_queue = asyncio.Queue() async def run(self): """Run the WebSocket ASGI application.""" # Send initial connect event await self._receive_queue.put({"type": "websocket.connect"}) # Start frame reading task read_task = asyncio.create_task(self._read_frames()) try: await self.app(self.scope, self._receive, self._send) except Exception: self.log.exception("Error in WebSocket ASGI application") finally: read_task.cancel() try: await read_task except asyncio.CancelledError: pass # Send close frame if not already closed if not self.closed and self.accepted: await self._send_close(CLOSE_INTERNAL_ERROR, "Application error") async def _receive(self): """ASGI receive callable.""" return await self._receive_queue.get() async def _send(self, message): """ASGI send callable.""" msg_type = message["type"] if msg_type == "websocket.accept": if self.accepted: raise RuntimeError("WebSocket already accepted") await self._send_accept(message) self.accepted = True elif msg_type == "websocket.send": if not self.accepted: raise RuntimeError("WebSocket not accepted") if self.closed: raise RuntimeError("WebSocket closed") if "text" in message: await self._send_frame(OPCODE_TEXT, message["text"].encode("utf-8")) elif "bytes" in message: await self._send_frame(OPCODE_BINARY, message["bytes"]) elif msg_type == "websocket.close": code = message.get("code", CLOSE_NORMAL) reason = message.get("reason", "") await self._send_close(code, reason) self.closed = True async def _send_accept(self, message): """Send WebSocket handshake accept response.""" # Get Sec-WebSocket-Key from headers ws_key = None for name, value in self.scope["headers"]: if name == b"sec-websocket-key": ws_key = value break if not ws_key: raise RuntimeError("Missing Sec-WebSocket-Key header") # Calculate accept key accept_key = base64.b64encode( hashlib.sha1(ws_key + WS_GUID).digest() ).decode("ascii") # Build response headers headers = [ "HTTP/1.1 101 Switching Protocols\r\n", "Upgrade: websocket\r\n", "Connection: Upgrade\r\n", f"Sec-WebSocket-Accept: {accept_key}\r\n", ] # Add selected subprotocol if specified subprotocol = message.get("subprotocol") if subprotocol: headers.append(f"Sec-WebSocket-Protocol: {subprotocol}\r\n") # Add any extra headers from message extra_headers = message.get("headers", []) for name, value in extra_headers: if isinstance(name, bytes): name = name.decode("latin-1") if isinstance(value, bytes): value = value.decode("latin-1") headers.append(f"{name}: {value}\r\n") headers.append("\r\n") self.transport.write("".join(headers).encode("latin-1")) async def _read_frames(self): """Read and process incoming WebSocket frames.""" try: while not self.closed: frame = await self._read_frame() if frame is None: break opcode, payload = frame if opcode == OPCODE_CLOSE: await self._handle_close(payload) break if opcode == OPCODE_PING: await self._send_frame(OPCODE_PONG, payload) elif opcode == OPCODE_PONG: # Ignore pongs pass elif opcode == OPCODE_TEXT: await self._receive_queue.put({ "type": "websocket.receive", "text": payload.decode("utf-8"), }) elif opcode == OPCODE_BINARY: await self._receive_queue.put({ "type": "websocket.receive", "bytes": payload, }) elif opcode == OPCODE_CONTINUATION: # Handle fragmented messages await self._handle_continuation(payload) except asyncio.CancelledError: raise except Exception as e: self.log.debug("WebSocket read error: %s", e) finally: # Signal disconnect if not self.closed: self.closed = True await self._receive_queue.put({ "type": "websocket.disconnect", "code": self.close_code or CLOSE_ABNORMAL, }) async def _read_frame(self): # pylint: disable=too-many-return-statements """Read a single WebSocket frame. Returns: tuple: (opcode, payload) or None if connection closed """ # Read frame header (2 bytes minimum) header = await self._read_exact(2) if not header: return None first_byte, second_byte = header[0], header[1] fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0x0F # RSV bits must be 0 (no extensions) if rsv1 or rsv2 or rsv3: await self._send_close(CLOSE_PROTOCOL_ERROR, "RSV bits set") return None masked = (second_byte >> 7) & 1 payload_len = second_byte & 0x7F # Client frames must be masked (RFC 6455) if not masked: await self._send_close(CLOSE_PROTOCOL_ERROR, "Frame not masked") return None # Extended payload length if payload_len == 126: ext_len = await self._read_exact(2) if not ext_len: return None payload_len = struct.unpack("!H", ext_len)[0] elif payload_len == 127: ext_len = await self._read_exact(8) if not ext_len: return None payload_len = struct.unpack("!Q", ext_len)[0] # Read masking key masking_key = await self._read_exact(4) if not masking_key: return None # Read payload payload = await self._read_exact(payload_len) if payload is None: return None # Unmask payload payload = self._unmask(payload, masking_key) # Handle fragmented messages if opcode == OPCODE_CONTINUATION: if self._fragment_opcode is None: await self._send_close(CLOSE_PROTOCOL_ERROR, "Unexpected continuation") return None self._fragments.append(payload) if fin: # Reassemble complete message full_payload = b"".join(self._fragments) final_opcode = self._fragment_opcode self._fragments = [] self._fragment_opcode = None return (final_opcode, full_payload) return (OPCODE_CONTINUATION, b"") # Fragment received, wait for more elif opcode in (OPCODE_TEXT, OPCODE_BINARY): if not fin: # Start of fragmented message self._fragment_opcode = opcode self._fragments = [payload] return (OPCODE_CONTINUATION, b"") # Fragment started, wait for more return (opcode, payload) else: # Control frames return (opcode, payload) async def _read_exact(self, n): """Read exactly n bytes from the reader.""" try: data = await self.reader.readexactly(n) return data except asyncio.IncompleteReadError: return None except Exception: return None def _unmask(self, payload, masking_key): """Unmask WebSocket payload data.""" if not payload: return payload # XOR each byte with corresponding mask byte return bytes(b ^ masking_key[i % 4] for i, b in enumerate(payload)) async def _handle_close(self, payload): """Handle incoming close frame.""" if len(payload) >= 2: self.close_code = struct.unpack("!H", payload[:2])[0] self.close_reason = payload[2:].decode("utf-8", errors="replace") else: self.close_code = CLOSE_NO_STATUS self.close_reason = "" # Echo close frame back if we haven't already sent one if not self.closed: await self._send_close(self.close_code, self.close_reason) self.closed = True async def _handle_continuation(self, payload): # pylint: disable=unused-argument """Handle continuation frame (already processed in _read_frame).""" # This is called for partial fragments, nothing to do here async def _send_frame(self, opcode, payload): """Send a WebSocket frame. Server frames are not masked (RFC 6455). """ if isinstance(payload, str): payload = payload.encode("utf-8") length = len(payload) frame = bytearray() # First byte: FIN + opcode frame.append(0x80 | opcode) # Second byte: length (no mask bit for server) if length < 126: frame.append(length) elif length < 65536: frame.append(126) frame.extend(struct.pack("!H", length)) else: frame.append(127) frame.extend(struct.pack("!Q", length)) # Payload frame.extend(payload) self.transport.write(bytes(frame)) async def _send_close(self, code, reason=""): """Send a close frame.""" payload = struct.pack("!H", code) if reason: payload += reason.encode("utf-8")[:123] # Max 125 bytes total await self._send_frame(OPCODE_CLOSE, payload) self.closed = True benoitc-gunicorn-f5fb19e/gunicorn/config.py000066400000000000000000002646041514360242400211420ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Please remember to run "make -C docs html" after update "desc" attributes. import argparse import copy import grp import inspect import ipaddress import os import pwd import re import shlex import ssl import sys import textwrap from gunicorn import __version__, util from gunicorn.errors import ConfigError from gunicorn.reloader import reloader_engines KNOWN_SETTINGS = [] PLATFORM = sys.platform def make_settings(ignore=None): settings = {} ignore = ignore or () for s in KNOWN_SETTINGS: setting = s() if setting.name in ignore: continue settings[setting.name] = setting.copy() return settings def auto_int(_, x): # for compatible with octal numbers in python3 if re.match(r'0(\d)', x, re.IGNORECASE): x = x.replace('0', '0o', 1) return int(x, 0) class Config: def __init__(self, usage=None, prog=None): self.settings = make_settings() self._forwarded_allow_networks = None self._proxy_allow_networks = None self.usage = usage self.prog = prog or os.path.basename(sys.argv[0]) self.env_orig = os.environ.copy() def __str__(self): lines = [] kmax = max(len(k) for k in self.settings) for k in sorted(self.settings): v = self.settings[k].value if callable(v): v = "<{}()>".format(v.__qualname__) lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax)) return "\n".join(lines) def __getattr__(self, name): if name == "settings": raise AttributeError() if name not in self.settings: raise AttributeError("No configuration setting for: %s" % name) return self.settings[name].get() def __setattr__(self, name, value): if name != "settings" and name in self.settings: raise AttributeError("Invalid access!") super().__setattr__(name, value) def set(self, name, value): if name not in self.settings: raise AttributeError("No configuration setting for: %s" % name) self.settings[name].set(value) def get_cmd_args_from_env(self): if 'GUNICORN_CMD_ARGS' in self.env_orig: return shlex.split(self.env_orig['GUNICORN_CMD_ARGS']) return [] def parser(self): kwargs = { "usage": self.usage, "prog": self.prog } parser = argparse.ArgumentParser(**kwargs) parser.add_argument("-v", "--version", action="version", default=argparse.SUPPRESS, version="%(prog)s (version " + __version__ + ")\n", help="show program's version number and exit") parser.add_argument("args", nargs="*", help=argparse.SUPPRESS) keys = sorted(self.settings, key=self.settings.__getitem__) for k in keys: self.settings[k].add_option(parser) return parser @property def worker_class_str(self): uri = self.settings['worker_class'].get() if isinstance(uri, str): # are we using a threaded worker? is_sync = uri.endswith('SyncWorker') or uri == 'sync' if is_sync and self.threads > 1: return "gthread" return uri return uri.__name__ @property def worker_class(self): uri = self.settings['worker_class'].get() # are we using a threaded worker? is_sync = isinstance(uri, str) and (uri.endswith('SyncWorker') or uri == 'sync') if is_sync and self.threads > 1: uri = "gunicorn.workers.gthread.ThreadWorker" worker_class = util.load_class(uri) if hasattr(worker_class, "setup"): worker_class.setup() return worker_class @property def address(self): s = self.settings['bind'].get() return [util.parse_address(util.bytes_to_str(bind)) for bind in s] @property def uid(self): return self.settings['user'].get() @property def gid(self): return self.settings['group'].get() @property def proc_name(self): pn = self.settings['proc_name'].get() if pn is not None: return pn else: return self.settings['default_proc_name'].get() @property def logger_class(self): uri = self.settings['logger_class'].get() if uri == "simple": # support the default uri = LoggerClass.default # if default logger is in use, and statsd is on, automagically switch # to the statsd logger if uri == LoggerClass.default: if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None: uri = "gunicorn.instrument.statsd.Statsd" logger_class = util.load_class( uri, default="gunicorn.glogging.Logger", section="gunicorn.loggers") if hasattr(logger_class, "install"): logger_class.install() return logger_class @property def is_ssl(self): return self.certfile or self.keyfile def forwarded_allow_networks(self): """Return cached network objects for forwarded_allow_ips (internal use).""" if self._forwarded_allow_networks is None: self._forwarded_allow_networks = [ ipaddress.ip_network(addr) for addr in self.forwarded_allow_ips if addr != "*" ] return self._forwarded_allow_networks def proxy_allow_networks(self): """Return cached network objects for proxy_allow_ips (internal use).""" if self._proxy_allow_networks is None: self._proxy_allow_networks = [ ipaddress.ip_network(addr) for addr in self.proxy_allow_ips if addr != "*" ] return self._proxy_allow_networks @property def ssl_options(self): opts = {} for name, value in self.settings.items(): if value.section == 'SSL': opts[name] = value.get() return opts @property def env(self): raw_env = self.settings['raw_env'].get() env = {} if not raw_env: return env for e in raw_env: s = util.bytes_to_str(e) try: k, v = s.split('=', 1) except ValueError: raise RuntimeError("environment setting %r invalid" % s) env[k] = v return env @property def sendfile(self): if self.settings['sendfile'].get() is not None: return False if 'SENDFILE' in os.environ: sendfile = os.environ['SENDFILE'].lower() return sendfile in ['y', '1', 'yes', 'true'] return True @property def reuse_port(self): return self.settings['reuse_port'].get() @property def paste_global_conf(self): raw_global_conf = self.settings['raw_paste_global_conf'].get() if raw_global_conf is None: return None global_conf = {} for e in raw_global_conf: s = util.bytes_to_str(e) try: k, v = re.split(r'(?" % ( self.__class__.__module__, self.__class__.__name__, id(self), self.value, ) Setting = SettingMeta('Setting', (Setting,), {}) def validate_bool(val): if val is None: return if isinstance(val, bool): return val if not isinstance(val, str): raise TypeError("Invalid type for casting: %s" % val) if val.lower().strip() == "true": return True elif val.lower().strip() == "false": return False else: raise ValueError("Invalid boolean: %s" % val) def validate_dict(val): if not isinstance(val, dict): raise TypeError("Value is not a dictionary: %s " % val) return val def validate_pos_int(val): if not isinstance(val, int): val = int(val, 0) else: # Booleans are ints! val = int(val) if val < 0: raise ValueError("Value must be positive: %s" % val) return val def validate_http2_frame_size(val): """Validate HTTP/2 max frame size per RFC 7540.""" if not isinstance(val, int): val = int(val, 0) else: val = int(val) if val < 16384 or val > 16777215: raise ValueError( f"http2_max_frame_size must be between 16384 and 16777215, got {val}" ) return val def validate_ssl_version(val): if val != SSLVersion.default: sys.stderr.write("Warning: option `ssl_version` is deprecated and it is ignored. Use ssl_context instead.\n") return val def validate_string(val): if val is None: return None if not isinstance(val, str): raise TypeError("Not a string: %s" % val) return val.strip() def validate_file_exists(val): if val is None: return None if not os.path.exists(val): raise ValueError("File %s does not exists." % val) return val def validate_list_string(val): if not val: return [] # legacy syntax if isinstance(val, str): val = [val] return [validate_string(v) for v in val] def validate_list_of_existing_files(val): return [validate_file_exists(v) for v in validate_list_string(val)] def validate_string_to_addr_list(val): val = validate_string_to_list(val) for addr in val: if addr == "*": continue # Validate that it's a valid IP address or CIDR network # but keep the string representation for backward compatibility. # Use strict mode to detect mistakes like 192.168.1.1/24 where # host bits are set (should be 192.168.1.0/24). ipaddress.ip_network(addr) return val def validate_string_to_list(val): val = validate_string(val) if not val: return [] return [v.strip() for v in val.split(",") if v] def validate_class(val): if inspect.isfunction(val) or inspect.ismethod(val): val = val() if inspect.isclass(val): return val return validate_string(val) def validate_callable(arity): def _validate_callable(val): if isinstance(val, str): try: mod_name, obj_name = val.rsplit(".", 1) except ValueError: raise TypeError("Value '%s' is not import string. " "Format: module[.submodules...].object" % val) try: mod = __import__(mod_name, fromlist=[obj_name]) val = getattr(mod, obj_name) except ImportError as e: raise TypeError(str(e)) except AttributeError: raise TypeError("Can not load '%s' from '%s'" "" % (obj_name, mod_name)) if not callable(val): raise TypeError("Value is not callable: %s" % val) if arity != -1 and arity != util.get_arity(val): raise TypeError("Value must have an arity of: %s" % arity) return val return _validate_callable def validate_user(val): if val is None: return os.geteuid() if isinstance(val, int): return val elif val.isdigit(): return int(val) else: try: return pwd.getpwnam(val).pw_uid except KeyError: raise ConfigError("No such user: '%s'" % val) def validate_group(val): if val is None: return os.getegid() if isinstance(val, int): return val elif val.isdigit(): return int(val) else: try: return grp.getgrnam(val).gr_gid except KeyError: raise ConfigError("No such group: '%s'" % val) def validate_post_request(val): val = validate_callable(-1)(val) largs = util.get_arity(val) if largs == 4: return val elif largs == 3: return lambda worker, req, env, _r: val(worker, req, env) elif largs == 2: return lambda worker, req, _e, _r: val(worker, req) else: raise TypeError("Value must have an arity of: 4") def validate_chdir(val): # valid if the value is a string val = validate_string(val) # transform relative paths path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val))) # test if the path exists if not os.path.exists(path): raise ConfigError("can't chdir to %r" % val) return path def validate_statsd_address(val): val = validate_string(val) if val is None: return None # As of major release 20, util.parse_address would recognize unix:PORT # as a UDS address, breaking backwards compatibility. We defend against # that regression here (this is also unit-tested). # Feel free to remove in the next major release. unix_hostname_regression = re.match(r'^unix:(\d+)$', val) if unix_hostname_regression: return ('unix', int(unix_hostname_regression.group(1))) try: address = util.parse_address(val, default_port='8125') except RuntimeError: raise TypeError("Value must be one of ('host:port', 'unix://PATH')") return address def validate_reload_engine(val): if val not in reloader_engines: raise ConfigError("Invalid reload_engine: %r" % val) return val def get_default_config_file(): config_path = os.path.join(os.path.abspath(os.getcwd()), 'gunicorn.conf.py') if os.path.exists(config_path): return config_path return None class ConfigFile(Setting): name = "config" section = "Config File" cli = ["-c", "--config"] meta = "CONFIG" validator = validate_string default = "./gunicorn.conf.py" desc = """\ :ref:`The Gunicorn config file`. A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``. Only has an effect when specified on the command line or as part of an application specific configuration. By default, a file named ``gunicorn.conf.py`` will be read from the same directory where gunicorn is being run. .. versionchanged:: 19.4 Loading the config from a Python module requires the ``python:`` prefix. """ class WSGIApp(Setting): name = "wsgi_app" section = "Config File" meta = "STRING" validator = validate_string default = None desc = """\ A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``. .. versionadded:: 20.1.0 """ class Bind(Setting): name = "bind" action = "append" section = "Server Socket" cli = ["-b", "--bind"] meta = "ADDRESS" validator = validate_list_string if 'PORT' in os.environ: default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))] else: default = ['127.0.0.1:8000'] desc = """\ The socket to bind. A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``, ``fd://FD``. An IP is a valid ``HOST``. .. versionchanged:: 20.0 Support for ``fd://FD`` got added. Multiple addresses can be bound. ex.:: $ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app will bind the `test:app` application on localhost both on ipv6 and ipv4 interfaces. If the ``PORT`` environment variable is defined, the default is ``['0.0.0.0:$PORT']``. If it is not defined, the default is ``['127.0.0.1:8000']``. """ class Backlog(Setting): name = "backlog" section = "Server Socket" cli = ["--backlog"] meta = "INT" validator = validate_pos_int type = int default = 2048 desc = """\ The maximum number of pending connections. This refers to the number of clients that can be waiting to be served. Exceeding this number results in the client getting an error when attempting to connect. It should only affect servers under significant load. Must be a positive integer. Generally set in the 64-2048 range. """ class Workers(Setting): name = "workers" section = "Worker Processes" cli = ["-w", "--workers"] meta = "INT" validator = validate_pos_int type = int default = int(os.environ.get("WEB_CONCURRENCY", 1)) desc = """\ The number of worker processes for handling requests. A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. You'll want to vary this a bit to find the best for your particular application's work load. By default, the value of the ``WEB_CONCURRENCY`` environment variable, which is set by some Platform-as-a-Service providers such as Heroku. If it is not defined, the default is ``1``. """ class WorkerClass(Setting): name = "worker_class" section = "Worker Processes" cli = ["-k", "--worker-class"] meta = "STRING" validator = validate_class default = "sync" desc = """\ The type of workers to use. The default class (``sync``) should handle most "normal" types of workloads. You'll want to read :doc:`design` for information on when you might want to choose one of the other worker classes. Required libraries may be installed using setuptools' ``extras_require`` feature. A string referring to one of the following bundled classes: * ``sync`` * ``eventlet`` - **DEPRECATED: will be removed in 26.0**. Requires eventlet >= 0.40.3 * ``gevent`` - Requires gevent >= 24.10.1 (or install it via ``pip install gunicorn[gevent]``) * ``tornado`` - Requires tornado >= 6.5.0 (or install it via ``pip install gunicorn[tornado]``) * ``gthread`` - Python 2 requires the futures package to be installed (or install it via ``pip install gunicorn[gthread]``) Optionally, you can provide your own worker by giving Gunicorn a Python path to a subclass of ``gunicorn.workers.base.Worker``. This alternative syntax will load the gevent class: ``gunicorn.workers.ggevent.GeventWorker``. """ class WorkerThreads(Setting): name = "threads" section = "Worker Processes" cli = ["--threads"] meta = "INT" validator = validate_pos_int type = int default = 1 desc = """\ The number of worker threads for handling requests. Run each worker with the specified number of threads. A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. You'll want to vary this a bit to find the best for your particular application's work load. If it is not defined, the default is ``1``. This setting only affects the Gthread worker type. .. note:: If you try to use the ``sync`` worker type and set the ``threads`` setting to more than 1, the ``gthread`` worker type will be used instead. """ class WorkerConnections(Setting): name = "worker_connections" section = "Worker Processes" cli = ["--worker-connections"] meta = "INT" validator = validate_pos_int type = int default = 1000 desc = """\ The maximum number of simultaneous clients. This setting only affects the ``gthread``, ``eventlet`` and ``gevent`` worker types. """ class MaxRequests(Setting): name = "max_requests" section = "Worker Processes" cli = ["--max-requests"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The maximum number of requests a worker will process before restarting. Any value greater than zero will limit the number of requests a worker will process before automatically restarting. This is a simple method to help limit the damage of memory leaks. If this is set to zero (the default) then the automatic worker restarts are disabled. """ class MaxRequestsJitter(Setting): name = "max_requests_jitter" section = "Worker Processes" cli = ["--max-requests-jitter"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The maximum jitter to add to the *max_requests* setting. The jitter causes the restart per worker to be randomized by ``randint(0, max_requests_jitter)``. This is intended to stagger worker restarts to avoid all workers restarting at the same time. .. versionadded:: 19.2 """ class Timeout(Setting): name = "timeout" section = "Worker Processes" cli = ["-t", "--timeout"] meta = "INT" validator = validate_pos_int type = int default = 30 desc = """\ Workers silent for more than this many seconds are killed and restarted. Value is a positive number or 0. Setting it to 0 has the effect of infinite timeouts by disabling timeouts for all workers entirely. Generally, the default of thirty seconds should suffice. Only set this noticeably higher if you're sure of the repercussions for sync workers. For the non sync workers it just means that the worker process is still communicating and is not tied to the length of time required to handle a single request. """ class GracefulTimeout(Setting): name = "graceful_timeout" section = "Worker Processes" cli = ["--graceful-timeout"] meta = "INT" validator = validate_pos_int type = int default = 30 desc = """\ Timeout for graceful workers restart in seconds. After receiving a restart signal, workers have this much time to finish serving requests. Workers still alive after the timeout (starting from the receipt of the restart signal) are force killed. """ class Keepalive(Setting): name = "keepalive" section = "Worker Processes" cli = ["--keep-alive"] meta = "INT" validator = validate_pos_int type = int default = 2 desc = """\ The number of seconds to wait for requests on a Keep-Alive connection. Generally set in the 1-5 seconds range for servers with direct connection to the client (e.g. when you don't have separate load balancer). When Gunicorn is deployed behind a load balancer, it often makes sense to set this to a higher value. .. note:: ``sync`` worker does not support persistent connections and will ignore this option. """ class LimitRequestLine(Setting): name = "limit_request_line" section = "Security" cli = ["--limit-request-line"] meta = "INT" validator = validate_pos_int type = int default = 4094 desc = """\ The maximum size of HTTP request line in bytes. This parameter is used to limit the allowed size of a client's HTTP request-line. Since the request-line consists of the HTTP method, URI, and protocol version, this directive places a restriction on the length of a request-URI allowed for a request on the server. A server needs this value to be large enough to hold any of its resource names, including any information that might be passed in the query part of a GET request. Value is a number from 0 (unlimited) to 8190. This parameter can be used to prevent any DDOS attack. """ class LimitRequestFields(Setting): name = "limit_request_fields" section = "Security" cli = ["--limit-request-fields"] meta = "INT" validator = validate_pos_int type = int default = 100 desc = """\ Limit the number of HTTP headers fields in a request. This parameter is used to limit the number of headers in a request to prevent DDOS attack. Used with the *limit_request_field_size* it allows more safety. By default this value is 100 and can't be larger than 32768. """ class LimitRequestFieldSize(Setting): name = "limit_request_field_size" section = "Security" cli = ["--limit-request-field_size"] meta = "INT" validator = validate_pos_int type = int default = 8190 desc = """\ Limit the allowed size of an HTTP request header field. Value is a positive number or 0. Setting it to 0 will allow unlimited header field sizes. .. warning:: Setting this parameter to a very high or unlimited value can open up for DDOS attacks. """ class Reload(Setting): name = "reload" section = 'Debugging' cli = ['--reload'] validator = validate_bool action = 'store_true' default = False desc = '''\ Restart workers when code changes. This setting is intended for development. It will cause workers to be restarted whenever application code changes. The reloader is incompatible with application preloading. When using a paste configuration be sure that the server block does not import any application code or the reload will not work as designed. The default behavior is to attempt inotify with a fallback to file system polling. Generally, inotify should be preferred if available because it consumes less system resources. .. note:: In order to use the inotify reloader, you must have the ``inotify`` package installed. .. warning:: Enabling this will change what happens on failure to load the the application: While the reloader is active, any and all clients that can make requests can see the full exception and traceback! ''' class ReloadEngine(Setting): name = "reload_engine" section = "Debugging" cli = ["--reload-engine"] meta = "STRING" validator = validate_reload_engine default = "auto" desc = """\ The implementation that should be used to power :ref:`reload`. Valid engines are: * ``'auto'`` * ``'poll'`` * ``'inotify'`` (requires inotify) .. versionadded:: 19.7 """ class ReloadExtraFiles(Setting): name = "reload_extra_files" action = "append" section = "Debugging" cli = ["--reload-extra-file"] meta = "FILES" validator = validate_list_of_existing_files default = [] desc = """\ Extends :ref:`reload` option to also watch and reload on additional files (e.g., templates, configurations, specifications, etc.). .. versionadded:: 19.8 """ class Spew(Setting): name = "spew" section = "Debugging" cli = ["--spew"] validator = validate_bool action = "store_true" default = False desc = """\ Install a trace function that spews every line executed by the server. This is the nuclear option. """ class ConfigCheck(Setting): name = "check_config" section = "Debugging" cli = ["--check-config"] validator = validate_bool action = "store_true" default = False desc = """\ Check the configuration and exit. The exit status is 0 if the configuration is correct, and 1 if the configuration is incorrect. """ class PrintConfig(Setting): name = "print_config" section = "Debugging" cli = ["--print-config"] validator = validate_bool action = "store_true" default = False desc = """\ Print the configuration settings as fully resolved. Implies :ref:`check-config`. """ class PreloadApp(Setting): name = "preload_app" section = "Server Mechanics" cli = ["--preload"] validator = validate_bool action = "store_true" default = False desc = """\ Load application code before the worker processes are forked. By preloading an application you can save some RAM resources as well as speed up server boot times. Although, if you defer application loading to each worker process, you can reload your application code easily by restarting workers. """ class Sendfile(Setting): name = "sendfile" section = "Server Mechanics" cli = ["--no-sendfile"] validator = validate_bool action = "store_const" const = False desc = """\ Disables the use of ``sendfile()``. If not set, the value of the ``SENDFILE`` environment variable is used to enable or disable its usage. .. versionadded:: 19.2 .. versionchanged:: 19.4 Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow disabling. .. versionchanged:: 19.6 added support for the ``SENDFILE`` environment variable """ class ReusePort(Setting): name = "reuse_port" section = "Server Mechanics" cli = ["--reuse-port"] validator = validate_bool action = "store_true" default = False desc = """\ Set the ``SO_REUSEPORT`` flag on the listening socket. .. versionadded:: 19.8 """ class Chdir(Setting): name = "chdir" section = "Server Mechanics" cli = ["--chdir"] validator = validate_chdir default = util.getcwd() default_doc = "``'.'``" desc = """\ Change directory to specified directory before loading apps. """ class Daemon(Setting): name = "daemon" section = "Server Mechanics" cli = ["-D", "--daemon"] validator = validate_bool action = "store_true" default = False desc = """\ Daemonize the Gunicorn process. Detaches the server from the controlling terminal and enters the background. """ class Env(Setting): name = "raw_env" action = "append" section = "Server Mechanics" cli = ["-e", "--env"] meta = "ENV" validator = validate_list_string default = [] desc = """\ Set environment variables in the execution environment. Should be a list of strings in the ``key=value`` format. For example on the command line: .. code-block:: console $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app Or in the configuration file: .. code-block:: python raw_env = ["FOO=1"] """ class Pidfile(Setting): name = "pidfile" section = "Server Mechanics" cli = ["-p", "--pid"] meta = "FILE" validator = validate_string default = None desc = """\ A filename to use for the PID file. If not set, no PID file will be written. """ class WorkerTmpDir(Setting): name = "worker_tmp_dir" section = "Server Mechanics" cli = ["--worker-tmp-dir"] meta = "DIR" validator = validate_string default = None desc = """\ A directory to use for the worker heartbeat temporary file. If not set, the default temporary directory will be used. .. note:: The current heartbeat system involves calling ``os.fchmod`` on temporary file handlers and may block a worker for arbitrary time if the directory is on a disk-backed filesystem. See :ref:`blocking-os-fchmod` for more detailed information and a solution for avoiding this problem. """ class User(Setting): name = "user" section = "Server Mechanics" cli = ["-u", "--user"] meta = "USER" validator = validate_user default = os.geteuid() default_doc = "``os.geteuid()``" desc = """\ Switch worker processes to run as this user. A valid user id (as an integer) or the name of a user that can be retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not change the worker process user. """ class Group(Setting): name = "group" section = "Server Mechanics" cli = ["-g", "--group"] meta = "GROUP" validator = validate_group default = os.getegid() default_doc = "``os.getegid()``" desc = """\ Switch worker process to run as this group. A valid group id (as an integer) or the name of a user that can be retrieved with a call to ``grp.getgrnam(value)`` or ``None`` to not change the worker processes group. """ class Umask(Setting): name = "umask" section = "Server Mechanics" cli = ["-m", "--umask"] meta = "INT" validator = validate_pos_int type = auto_int default = 0 desc = """\ A bit mask for the file mode on files written by Gunicorn. Note that this affects unix socket permissions. A valid value for the ``os.umask(mode)`` call or a string compatible with ``int(value, 0)`` (``0`` means Python guesses the base, so values like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal representations) """ class Initgroups(Setting): name = "initgroups" section = "Server Mechanics" cli = ["--initgroups"] validator = validate_bool action = 'store_true' default = False desc = """\ If true, set the worker process's group access list with all of the groups of which the specified username is a member, plus the specified group id. .. versionadded:: 19.7 """ class TmpUploadDir(Setting): name = "tmp_upload_dir" section = "Server Mechanics" meta = "DIR" validator = validate_string default = None desc = """\ Directory to store temporary request data as they are read. This may disappear in the near future. This path should be writable by the process permissions set for Gunicorn workers. If not specified, Gunicorn will choose a system generated temporary directory. """ class SecureSchemeHeader(Setting): name = "secure_scheme_headers" section = "Server Mechanics" validator = validate_dict default = { "X-FORWARDED-PROTOCOL": "ssl", "X-FORWARDED-PROTO": "https", "X-FORWARDED-SSL": "on" } desc = """\ A dictionary containing headers and values that the front-end proxy uses to indicate HTTPS requests. If the source IP is permitted by :ref:`forwarded-allow-ips` (below), *and* at least one request header matches a key-value pair listed in this dictionary, then Gunicorn will set ``wsgi.url_scheme`` to ``https``, so your application can tell that the request is secure. If the other headers listed in this dictionary are not present in the request, they will be ignored, but if the other headers are present and do not match the provided values, then the request will fail to parse. See the note below for more detailed examples of this behaviour. The dictionary should map upper-case header names to exact string values. The value comparisons are case-sensitive, unlike the header names, so make sure they're exactly what your front-end proxy sends when handling HTTPS requests. It is important that your front-end proxy configuration ensures that the headers defined here can not be passed directly from the client. """ class ForwardedAllowIPS(Setting): name = "forwarded_allow_ips" section = "Server Mechanics" cli = ["--forwarded-allow-ips"] meta = "STRING" validator = validate_string_to_addr_list default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1,::1") desc = """\ Front-end's IP addresses or networks from which allowed to handle set secure headers. (comma separated). Supports both individual IP addresses (e.g., ``192.168.1.1``) and CIDR networks (e.g., ``192.168.0.0/16``). Set to ``*`` to disable checking of front-end IPs. This is useful for setups where you don't know in advance the IP address of front-end, but instead have ensured via other means that only your authorized front-ends can access Gunicorn. By default, the value of the ``FORWARDED_ALLOW_IPS`` environment variable. If it is not defined, the default is ``"127.0.0.1,::1"``. .. note:: This option does not affect UNIX socket connections. Connections not associated with an IP address are treated as allowed, unconditionally. .. note:: The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of ``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``: .. code:: secure_scheme_headers = { 'X-FORWARDED-PROTOCOL': 'ssl', 'X-FORWARDED-PROTO': 'https', 'X-FORWARDED-SSL': 'on' } .. list-table:: :header-rows: 1 :align: center :widths: auto * - ``forwarded-allow-ips`` - Secure Request Headers - Result - Explanation * - .. code:: ["127.0.0.1"] - .. code:: X-Forwarded-Proto: https - .. code:: wsgi.url_scheme = "http" - IP address was not allowed * - .. code:: "*" - - .. code:: wsgi.url_scheme = "http" - IP address allowed, but no secure headers provided * - .. code:: "*" - .. code:: X-Forwarded-Proto: https - .. code:: wsgi.url_scheme = "https" - IP address allowed, one request header matched * - .. code:: ["134.213.44.18"] - .. code:: X-Forwarded-Ssl: on X-Forwarded-Proto: http - ``InvalidSchemeHeaders()`` raised - IP address allowed, but the two secure headers disagreed on if HTTPS was used """ class AccessLog(Setting): name = "accesslog" section = "Logging" cli = ["--access-logfile"] meta = "FILE" validator = validate_string default = None desc = """\ The Access log file to write to. ``'-'`` means log to stdout. """ class DisableRedirectAccessToSyslog(Setting): name = "disable_redirect_access_to_syslog" section = "Logging" cli = ["--disable-redirect-access-to-syslog"] validator = validate_bool action = 'store_true' default = False desc = """\ Disable redirect access logs to syslog. .. versionadded:: 19.8 """ class AccessLogFormat(Setting): name = "access_log_format" section = "Logging" cli = ["--access-logformat"] meta = "STRING" validator = validate_string default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' desc = """\ The access log format. =========== =========== Identifier Description =========== =========== h remote address l ``'-'`` u user name (if HTTP Basic auth used) t date of the request r status line (e.g. ``GET / HTTP/1.1``) m request method U URL path without query string q query string H protocol s status B response length b response length or ``'-'`` (CLF format) f referrer (note: header is ``referer``) a user agent T request time in seconds M request time in milliseconds D request time in microseconds L request time in decimal seconds p process ID {header}i request header {header}o response header {variable}e environment variable =========== =========== Use lowercase for header and environment variable names, and put ``{...}x`` names inside ``%(...)s``. For example:: %({x-forwarded-for}i)s """ class ErrorLog(Setting): name = "errorlog" section = "Logging" cli = ["--error-logfile", "--log-file"] meta = "FILE" validator = validate_string default = '-' desc = """\ The Error log file to write to. Using ``'-'`` for FILE makes gunicorn log to stderr. .. versionchanged:: 19.2 Log to stderr by default. """ class Loglevel(Setting): name = "loglevel" section = "Logging" cli = ["--log-level"] meta = "LEVEL" validator = validate_string default = "info" desc = """\ The granularity of Error log outputs. Valid level names are: * ``'debug'`` * ``'info'`` * ``'warning'`` * ``'error'`` * ``'critical'`` """ class CaptureOutput(Setting): name = "capture_output" section = "Logging" cli = ["--capture-output"] validator = validate_bool action = 'store_true' default = False desc = """\ Redirect stdout/stderr to specified file in :ref:`errorlog`. .. versionadded:: 19.6 """ class LoggerClass(Setting): name = "logger_class" section = "Logging" cli = ["--logger-class"] meta = "STRING" validator = validate_class default = "gunicorn.glogging.Logger" desc = """\ The logger you want to use to log events in Gunicorn. The default class (``gunicorn.glogging.Logger``) handles most normal usages in logging. It provides error and access logging. You can provide your own logger by giving Gunicorn a Python path to a class that quacks like ``gunicorn.glogging.Logger``. """ class LogConfig(Setting): name = "logconfig" section = "Logging" cli = ["--log-config"] meta = "FILE" validator = validate_string default = None desc = """\ The log config file to use. Gunicorn uses the standard Python logging module's Configuration file format. """ class LogConfigDict(Setting): name = "logconfig_dict" section = "Logging" validator = validate_dict default = {} desc = """\ The log config dictionary to use, using the standard Python logging module's dictionary configuration format. This option takes precedence over the :ref:`logconfig` and :ref:`logconfig-json` options, which uses the older file configuration format and JSON respectively. Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig For more context you can look at the default configuration dictionary for logging, which can be found at ``gunicorn.glogging.CONFIG_DEFAULTS``. .. versionadded:: 19.8 """ class LogConfigJson(Setting): name = "logconfig_json" section = "Logging" cli = ["--log-config-json"] meta = "FILE" validator = validate_string default = None desc = """\ The log config to read config from a JSON file Format: https://docs.python.org/3/library/logging.config.html#logging.config.jsonConfig .. versionadded:: 20.0 """ class SyslogTo(Setting): name = "syslog_addr" section = "Logging" cli = ["--log-syslog-to"] meta = "SYSLOG_ADDR" validator = validate_string if PLATFORM == "darwin": default = "unix:///var/run/syslog" elif PLATFORM in ('freebsd', 'dragonfly', ): default = "unix:///var/run/log" elif PLATFORM == "openbsd": default = "unix:///dev/log" else: default = "udp://localhost:514" default_doc = """\ Platform-specific: * macOS: ``'unix:///var/run/syslog'`` * FreeBSD/DragonFly: ``'unix:///var/run/log'`` * OpenBSD: ``'unix:///dev/log'`` * Linux/other: ``'udp://localhost:514'`` """ desc = """\ Address to send syslog messages. Address is a string of the form: * ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream`` for the stream driver or ``dgram`` for the dgram driver. ``stream`` is the default. * ``udp://HOST:PORT`` : for UDP sockets * ``tcp://HOST:PORT`` : for TCP sockets """ class Syslog(Setting): name = "syslog" section = "Logging" cli = ["--log-syslog"] validator = validate_bool action = 'store_true' default = False desc = """\ Send *Gunicorn* logs to syslog. .. versionchanged:: 19.8 You can now disable sending access logs by using the :ref:`disable-redirect-access-to-syslog` setting. """ class SyslogPrefix(Setting): name = "syslog_prefix" section = "Logging" cli = ["--log-syslog-prefix"] meta = "SYSLOG_PREFIX" validator = validate_string default = None desc = """\ Makes Gunicorn use the parameter as program-name in the syslog entries. All entries will be prefixed by ``gunicorn.``. By default the program name is the name of the process. """ class SyslogFacility(Setting): name = "syslog_facility" section = "Logging" cli = ["--log-syslog-facility"] meta = "SYSLOG_FACILITY" validator = validate_string default = "user" desc = """\ Syslog facility name """ class EnableStdioInheritance(Setting): name = "enable_stdio_inheritance" section = "Logging" cli = ["-R", "--enable-stdio-inheritance"] validator = validate_bool default = False action = "store_true" desc = """\ Enable stdio inheritance. Enable inheritance for stdio file descriptors in daemon mode. Note: To disable the Python stdout buffering, you can to set the user environment variable ``PYTHONUNBUFFERED`` . """ # statsD monitoring class StatsdHost(Setting): name = "statsd_host" section = "Logging" cli = ["--statsd-host"] meta = "STATSD_ADDR" default = None validator = validate_statsd_address desc = """\ The address of the StatsD server to log to. Address is a string of the form: * ``unix://PATH`` : for a unix domain socket. * ``HOST:PORT`` : for a network address .. versionadded:: 19.1 """ # Datadog Statsd (dogstatsd) tags. https://docs.datadoghq.com/developers/dogstatsd/ class DogstatsdTags(Setting): name = "dogstatsd_tags" section = "Logging" cli = ["--dogstatsd-tags"] meta = "DOGSTATSD_TAGS" default = "" validator = validate_string desc = """\ A comma-delimited list of datadog statsd (dogstatsd) tags to append to statsd metrics. e.g. ``'tag1:value1,tag2:value2'`` .. versionadded:: 20 """ class StatsdPrefix(Setting): name = "statsd_prefix" section = "Logging" cli = ["--statsd-prefix"] meta = "STATSD_PREFIX" default = "" validator = validate_string desc = """\ Prefix to use when emitting statsd metrics (a trailing ``.`` is added, if not provided). .. versionadded:: 19.2 """ class BacklogMetric(Setting): name = "enable_backlog_metric" section = "Logging" cli = ["--enable-backlog-metric"] validator = validate_bool default = False action = "store_true" desc = """\ Enable socket backlog metric (only supported on Linux). When enabled, gunicorn will emit a ``gunicorn.backlog`` histogram metric showing the number of connections waiting in the socket backlog. """ class Procname(Setting): name = "proc_name" section = "Process Naming" cli = ["-n", "--name"] meta = "STRING" validator = validate_string default = None desc = """\ A base to use with setproctitle for process naming. This affects things like ``ps`` and ``top``. If you're going to be running more than one instance of Gunicorn you'll probably want to set a name to tell them apart. This requires that you install the setproctitle module. If not set, the *default_proc_name* setting will be used. """ class DefaultProcName(Setting): name = "default_proc_name" section = "Process Naming" validator = validate_string default = "gunicorn" desc = """\ Internal setting that is adjusted for each type of application. """ class PythonPath(Setting): name = "pythonpath" section = "Server Mechanics" cli = ["--pythonpath"] meta = "STRING" validator = validate_string default = None desc = """\ A comma-separated list of directories to add to the Python path. e.g. ``'/home/djangoprojects/myproject,/home/python/mylibrary'``. """ class Paste(Setting): name = "paste" section = "Server Mechanics" cli = ["--paste", "--paster"] meta = "STRING" validator = validate_string default = None desc = """\ Load a PasteDeploy config file. The argument may contain a ``#`` symbol followed by the name of an app section from the config file, e.g. ``production.ini#admin``. At this time, using alternate server blocks is not supported. Use the command line arguments to control server configuration instead. """ class OnStarting(Setting): name = "on_starting" section = "Server Hooks" validator = validate_callable(1) type = callable def on_starting(server): pass default = staticmethod(on_starting) desc = """\ Called just before the master process is initialized. The callable needs to accept a single instance variable for the Arbiter. """ class OnReload(Setting): name = "on_reload" section = "Server Hooks" validator = validate_callable(1) type = callable def on_reload(server): pass default = staticmethod(on_reload) desc = """\ Called to recycle workers during a reload via SIGHUP. The callable needs to accept a single instance variable for the Arbiter. """ class WhenReady(Setting): name = "when_ready" section = "Server Hooks" validator = validate_callable(1) type = callable def when_ready(server): pass default = staticmethod(when_ready) desc = """\ Called just after the server is started. The callable needs to accept a single instance variable for the Arbiter. """ class Prefork(Setting): name = "pre_fork" section = "Server Hooks" validator = validate_callable(2) type = callable def pre_fork(server, worker): pass default = staticmethod(pre_fork) desc = """\ Called just before a worker is forked. The callable needs to accept two instance variables for the Arbiter and new Worker. """ class Postfork(Setting): name = "post_fork" section = "Server Hooks" validator = validate_callable(2) type = callable def post_fork(server, worker): pass default = staticmethod(post_fork) desc = """\ Called just after a worker has been forked. The callable needs to accept two instance variables for the Arbiter and new Worker. """ class PostWorkerInit(Setting): name = "post_worker_init" section = "Server Hooks" validator = validate_callable(1) type = callable def post_worker_init(worker): pass default = staticmethod(post_worker_init) desc = """\ Called just after a worker has initialized the application. The callable needs to accept one instance variable for the initialized Worker. """ class WorkerInt(Setting): name = "worker_int" section = "Server Hooks" validator = validate_callable(1) type = callable def worker_int(worker): pass default = staticmethod(worker_int) desc = """\ Called just after a worker exited on SIGINT or SIGQUIT. The callable needs to accept one instance variable for the initialized Worker. """ class WorkerAbort(Setting): name = "worker_abort" section = "Server Hooks" validator = validate_callable(1) type = callable def worker_abort(worker): pass default = staticmethod(worker_abort) desc = """\ Called when a worker received the SIGABRT signal. This call generally happens on timeout. The callable needs to accept one instance variable for the initialized Worker. """ class PreExec(Setting): name = "pre_exec" section = "Server Hooks" validator = validate_callable(1) type = callable def pre_exec(server): pass default = staticmethod(pre_exec) desc = """\ Called just before a new master process is forked. The callable needs to accept a single instance variable for the Arbiter. """ class PreRequest(Setting): name = "pre_request" section = "Server Hooks" validator = validate_callable(2) type = callable def pre_request(worker, req): worker.log.debug("%s %s", req.method, req.path) default = staticmethod(pre_request) desc = """\ Called just before a worker processes the request. The callable needs to accept two instance variables for the Worker and the Request. """ class PostRequest(Setting): name = "post_request" section = "Server Hooks" validator = validate_post_request type = callable def post_request(worker, req, environ, resp): pass default = staticmethod(post_request) desc = """\ Called after a worker processes the request. The callable needs to accept two instance variables for the Worker and the Request. If a third parameter is defined it will be passed the environment. If a fourth parameter is defined it will be passed the Response. """ class ChildExit(Setting): name = "child_exit" section = "Server Hooks" validator = validate_callable(2) type = callable def child_exit(server, worker): pass default = staticmethod(child_exit) desc = """\ Called just after a worker has been exited, in the master process. The callable needs to accept two instance variables for the Arbiter and the just-exited Worker. .. versionadded:: 19.7 """ class WorkerExit(Setting): name = "worker_exit" section = "Server Hooks" validator = validate_callable(2) type = callable def worker_exit(server, worker): pass default = staticmethod(worker_exit) desc = """\ Called just after a worker has been exited, in the worker process. The callable needs to accept two instance variables for the Arbiter and the just-exited Worker. """ class NumWorkersChanged(Setting): name = "nworkers_changed" section = "Server Hooks" validator = validate_callable(3) type = callable def nworkers_changed(server, new_value, old_value): pass default = staticmethod(nworkers_changed) desc = """\ Called just after *num_workers* has been changed. The callable needs to accept an instance variable of the Arbiter and two integers of number of workers after and before change. If the number of workers is set for the first time, *old_value* would be ``None``. """ class OnExit(Setting): name = "on_exit" section = "Server Hooks" validator = validate_callable(1) def on_exit(server): pass default = staticmethod(on_exit) desc = """\ Called just before exiting Gunicorn. The callable needs to accept a single instance variable for the Arbiter. """ class NewSSLContext(Setting): name = "ssl_context" section = "Server Hooks" validator = validate_callable(2) type = callable def ssl_context(config, default_ssl_context_factory): return default_ssl_context_factory() default = staticmethod(ssl_context) desc = """\ Called when SSLContext is needed. Allows customizing SSL context. The callable needs to accept an instance variable for the Config and a factory function that returns default SSLContext which is initialized with certificates, private key, cert_reqs, and ciphers according to config and can be further customized by the callable. The callable needs to return SSLContext object. Following example shows a configuration file that sets the minimum TLS version to 1.3: .. code-block:: python def ssl_context(conf, default_ssl_context_factory): import ssl context = default_ssl_context_factory() context.minimum_version = ssl.TLSVersion.TLSv1_3 return context .. versionadded:: 21.0 """ def validate_proxy_protocol(val): """Validate proxy_protocol setting. Accepts: off, false, v1, v2, auto, true Returns normalized value: off, v1, v2, or auto """ if val is None: return "off" if isinstance(val, bool): return "auto" if val else "off" if not isinstance(val, str): raise TypeError("proxy_protocol must be string or bool") val = val.lower().strip() mapping = { "false": "off", "off": "off", "0": "off", "none": "off", "true": "auto", "auto": "auto", "1": "auto", "v1": "v1", "v2": "v2", } if val not in mapping: raise ValueError("proxy_protocol must be: off, v1, v2, or auto") return mapping[val] class ProxyProtocol(Setting): name = "proxy_protocol" section = "Server Mechanics" cli = ["--proxy-protocol"] meta = "MODE" validator = validate_proxy_protocol default = "off" nargs = "?" const = "auto" desc = """\ Enable PROXY protocol support. Allow using HTTP and PROXY protocol together. It may be useful for work with stunnel as HTTPS frontend and Gunicorn as HTTP server, or with HAProxy. Accepted values: * ``off`` - Disabled (default) * ``v1`` - PROXY protocol v1 only (text format) * ``v2`` - PROXY protocol v2 only (binary format) * ``auto`` - Auto-detect v1 or v2 Using ``--proxy-protocol`` without a value is equivalent to ``auto``. PROXY protocol v1: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt PROXY protocol v2: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt Example for stunnel config:: [https] protocol = proxy accept = 443 connect = 80 cert = /etc/ssl/certs/stunnel.pem key = /etc/ssl/certs/stunnel.key .. versionchanged:: 24.1.0 Extended to support version selection (v1, v2, auto). """ class ProxyAllowFrom(Setting): name = "proxy_allow_ips" section = "Server Mechanics" cli = ["--proxy-allow-from"] validator = validate_string_to_addr_list default = "127.0.0.1,::1" desc = """\ Front-end's IP addresses or networks from which allowed accept proxy requests (comma separated). Supports both individual IP addresses (e.g., ``192.168.1.1``) and CIDR networks (e.g., ``192.168.0.0/16``). Set to ``*`` to disable checking of front-end IPs. This is useful for setups where you don't know in advance the IP address of front-end, but instead have ensured via other means that only your authorized front-ends can access Gunicorn. .. note:: This option does not affect UNIX socket connections. Connections not associated with an IP address are treated as allowed, unconditionally. """ class Protocol(Setting): name = "protocol" section = "Server Mechanics" cli = ["--protocol"] meta = "STRING" validator = validate_string default = "http" desc = """\ The protocol for incoming connections. * ``http`` - Standard HTTP/1.x (default) * ``uwsgi`` - uWSGI binary protocol (for nginx uwsgi_pass) When using the uWSGI protocol, Gunicorn can receive requests from nginx using the uwsgi_pass directive:: upstream gunicorn { server 127.0.0.1:8000; } location / { uwsgi_pass gunicorn; include uwsgi_params; } """ class UWSGIAllowFrom(Setting): name = "uwsgi_allow_ips" section = "Server Mechanics" cli = ["--uwsgi-allow-from"] validator = validate_string_to_addr_list default = "127.0.0.1,::1" desc = """\ IPs allowed to send uWSGI protocol requests (comma separated). Set to ``*`` to allow all IPs. This is useful for setups where you don't know in advance the IP address of front-end, but instead have ensured via other means that only your authorized front-ends can access Gunicorn. .. note:: This option does not affect UNIX socket connections. Connections not associated with an IP address are treated as allowed, unconditionally. """ class KeyFile(Setting): name = "keyfile" section = "SSL" cli = ["--keyfile"] meta = "FILE" validator = validate_string default = None desc = """\ SSL key file """ class CertFile(Setting): name = "certfile" section = "SSL" cli = ["--certfile"] meta = "FILE" validator = validate_string default = None desc = """\ SSL certificate file """ class SSLVersion(Setting): name = "ssl_version" section = "SSL" cli = ["--ssl-version"] validator = validate_ssl_version if hasattr(ssl, "PROTOCOL_TLS"): default = ssl.PROTOCOL_TLS else: default = ssl.PROTOCOL_SSLv23 default = ssl.PROTOCOL_SSLv23 desc = """\ SSL version to use (see stdlib ssl module's). .. deprecated:: 21.0 The option is deprecated and it is currently ignored. Use :ref:`ssl-context` instead. ============= ============ --ssl-version Description ============= ============ SSLv3 SSLv3 is not-secure and is strongly discouraged. SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS. TLS Negotiate highest possible version between client/server. Can yield SSL. (Python 3.6+) TLSv1 TLS 1.0 TLSv1_1 TLS 1.1 (Python 3.4+) TLSv1_2 TLS 1.2 (Python 3.4+) TLS_SERVER Auto-negotiate the highest protocol version like TLS, but only support server-side SSLSocket connections. (Python 3.6+) ============= ============ .. versionchanged:: 19.7 The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to ``ssl.PROTOCOL_SSLv23``. .. versionchanged:: 20.0 This setting now accepts string names based on ``ssl.PROTOCOL_`` constants. .. versionchanged:: 20.0.1 The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to ``ssl.PROTOCOL_TLS`` when Python >= 3.6 . """ class CertReqs(Setting): name = "cert_reqs" section = "SSL" cli = ["--cert-reqs"] validator = validate_pos_int default = ssl.CERT_NONE desc = """\ Whether client certificate is required (see stdlib ssl module's) =========== =========================== --cert-reqs Description =========== =========================== `0` no client verification `1` ssl.CERT_OPTIONAL `2` ssl.CERT_REQUIRED =========== =========================== """ class CACerts(Setting): name = "ca_certs" section = "SSL" cli = ["--ca-certs"] meta = "FILE" validator = validate_string default = None desc = """\ CA certificates file """ class SuppressRaggedEOFs(Setting): name = "suppress_ragged_eofs" section = "SSL" cli = ["--suppress-ragged-eofs"] action = "store_true" default = True validator = validate_bool desc = """\ Suppress ragged EOFs (see stdlib ssl module's) """ class DoHandshakeOnConnect(Setting): name = "do_handshake_on_connect" section = "SSL" cli = ["--do-handshake-on-connect"] validator = validate_bool action = "store_true" default = False desc = """\ Whether to perform SSL handshake on socket connect (see stdlib ssl module's) """ class Ciphers(Setting): name = "ciphers" section = "SSL" cli = ["--ciphers"] validator = validate_string default = None desc = """\ SSL Cipher suite to use, in the format of an OpenSSL cipher list. By default we use the default cipher list from Python's ``ssl`` module, which contains ciphers considered strong at the time of each Python release. As a recommended alternative, the Open Web App Security Project (OWASP) offers `a vetted set of strong cipher strings rated A+ to C- `_. OWASP provides details on user-agent compatibility at each security level. See the `OpenSSL Cipher List Format Documentation `_ for details on the format of an OpenSSL cipher list. """ # HTTP/2 Protocol Settings # Valid protocol identifiers VALID_HTTP_PROTOCOLS = frozenset(["h1", "h2", "h3"]) # Map protocol identifiers to ALPN protocol names ALPN_PROTOCOL_MAP = { "h1": "http/1.1", "h2": "h2", "h3": "h3", # Future: HTTP/3 over QUIC } def validate_http_protocols(val): """Validate http_protocols setting. Accepts comma-separated list of protocol identifiers. Valid values: h1 (HTTP/1.1), h2 (HTTP/2), h3 (HTTP/3 - future) Order indicates preference (first = most preferred). """ if val is None: return ["h1"] if not isinstance(val, str): raise TypeError("http_protocols must be a string") val = val.strip() if not val: return ["h1"] protocols = [p.strip().lower() for p in val.split(",") if p.strip()] if not protocols: return ["h1"] # Validate each protocol for proto in protocols: if proto not in VALID_HTTP_PROTOCOLS: raise ValueError( f"Invalid protocol '{proto}'. " f"Valid protocols: {', '.join(sorted(VALID_HTTP_PROTOCOLS))}" ) # Check for duplicates if len(protocols) != len(set(protocols)): raise ValueError("Duplicate protocols specified") return protocols class HTTPProtocols(Setting): name = "http_protocols" section = "HTTP/2" cli = ["--http-protocols"] meta = "STRING" validator = validate_http_protocols default = "h1" desc = """\ HTTP protocol versions to support (comma-separated, order = preference). Valid protocols: * ``h1`` - HTTP/1.1 (default) * ``h2`` - HTTP/2 (requires TLS with ALPN) * ``h3`` - HTTP/3 (future, not yet implemented) Examples:: # HTTP/1.1 only (default, backward compatible) --http-protocols=h1 # Prefer HTTP/2, fallback to HTTP/1.1 --http-protocols=h2,h1 # HTTP/2 only (reject HTTP/1.1 clients) --http-protocols=h2 HTTP/2 requires: * TLS (--certfile and --keyfile) * The h2 library: ``pip install gunicorn[http2]`` * ALPN-capable TLS client .. note:: HTTP/2 cleartext (h2c) is not supported due to security concerns and lack of browser support. .. versionadded:: 25.0.0 """ class HTTP2MaxConcurrentStreams(Setting): name = "http2_max_concurrent_streams" section = "HTTP/2" cli = ["--http2-max-concurrent-streams"] meta = "INT" validator = validate_pos_int type = int default = 100 desc = """\ Maximum number of concurrent HTTP/2 streams per connection. This limits how many requests can be processed simultaneously on a single HTTP/2 connection. Higher values allow more parallelism but use more memory. Default is 100, which matches common server configurations. The HTTP/2 specification allows up to 2^31-1. .. versionadded:: 25.0.0 """ class HTTP2InitialWindowSize(Setting): name = "http2_initial_window_size" section = "HTTP/2" cli = ["--http2-initial-window-size"] meta = "INT" validator = validate_pos_int type = int default = 65535 desc = """\ Initial HTTP/2 flow control window size in bytes. This controls how much data can be in-flight before the receiver sends WINDOW_UPDATE frames. Larger values can improve throughput for large transfers but use more memory. Default is 65535 (64KB - 1), the HTTP/2 specification default. Maximum is 2^31-1 (2147483647). .. versionadded:: 25.0.0 """ class HTTP2MaxFrameSize(Setting): name = "http2_max_frame_size" section = "HTTP/2" cli = ["--http2-max-frame-size"] meta = "INT" validator = validate_http2_frame_size type = int default = 16384 desc = """\ Maximum HTTP/2 frame payload size in bytes. This is the largest frame payload the server will accept. Larger frames reduce framing overhead but may increase latency for small messages. Default is 16384 (16KB), the HTTP/2 specification minimum. Range is 16384 to 16777215 (16MB - 1). .. versionadded:: 25.0.0 """ class HTTP2MaxHeaderListSize(Setting): name = "http2_max_header_list_size" section = "HTTP/2" cli = ["--http2-max-header-list-size"] meta = "INT" validator = validate_pos_int type = int default = 65536 desc = """\ Maximum size of HTTP/2 header list in bytes (HPACK protection). This limits the total size of headers after HPACK decompression. Protects against compression bombs and excessive memory use. Default is 65536 (64KB). Set to 0 for unlimited (not recommended). .. versionadded:: 25.0.0 """ class PasteGlobalConf(Setting): name = "raw_paste_global_conf" action = "append" section = "Server Mechanics" cli = ["--paste-global"] meta = "CONF" validator = validate_list_string default = [] desc = """\ Set a PasteDeploy global config variable in ``key=value`` form. The option can be specified multiple times. The variables are passed to the PasteDeploy entrypoint. Example:: $ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2 .. versionadded:: 19.7 """ class PermitObsoleteFolding(Setting): name = "permit_obsolete_folding" section = "Server Mechanics" cli = ["--permit-obsolete-folding"] validator = validate_bool action = "store_true" default = False desc = """\ Permit requests employing obsolete HTTP line folding mechanism The folding mechanism was deprecated by rfc7230 Section 3.2.4 and will not be employed in HTTP request headers from standards-compliant HTTP clients. This option is provided to diagnose backwards-incompatible changes. Use with care and only if necessary. Temporary; the precise effect of this option may change in a future version, or it may be removed altogether. .. versionadded:: 23.0.0 """ class StripHeaderSpaces(Setting): name = "strip_header_spaces" section = "Server Mechanics" cli = ["--strip-header-spaces"] validator = validate_bool action = "store_true" default = False desc = """\ Strip spaces present between the header name and the the ``:``. This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard. See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn. Use with care and only if necessary. Deprecated; scheduled for removal in 25.0.0 .. versionadded:: 20.0.1 """ class PermitUnconventionalHTTPMethod(Setting): name = "permit_unconventional_http_method" section = "Server Mechanics" cli = ["--permit-unconventional-http-method"] validator = validate_bool action = "store_true" default = False desc = """\ Permit HTTP methods not matching conventions, such as IANA registration guidelines This permits request methods of length less than 3 or more than 20, methods with lowercase characters or methods containing the # character. HTTP methods are case sensitive by definition, and merely uppercase by convention. If unset, Gunicorn will apply nonstandard restrictions and cause 400 response status in cases where otherwise 501 status is expected. While this option does modify that behaviour, it should not be depended upon to guarantee standards-compliant behaviour. Rather, it is provided temporarily, to assist in diagnosing backwards-incompatible changes around the incomplete application of those restrictions. Use with care and only if necessary. Temporary; scheduled for removal in 24.0.0 .. versionadded:: 22.0.0 """ class PermitUnconventionalHTTPVersion(Setting): name = "permit_unconventional_http_version" section = "Server Mechanics" cli = ["--permit-unconventional-http-version"] validator = validate_bool action = "store_true" default = False desc = """\ Permit HTTP version not matching conventions of 2023 This disables the refusal of likely malformed request lines. It is unusual to specify HTTP 1 versions other than 1.0 and 1.1. This option is provided to diagnose backwards-incompatible changes. Use with care and only if necessary. Temporary; the precise effect of this option may change in a future version, or it may be removed altogether. .. versionadded:: 22.0.0 """ class CasefoldHTTPMethod(Setting): name = "casefold_http_method" section = "Server Mechanics" cli = ["--casefold-http-method"] validator = validate_bool action = "store_true" default = False desc = """\ Transform received HTTP methods to uppercase HTTP methods are case sensitive by definition, and merely uppercase by convention. This option is provided because previous versions of gunicorn defaulted to this behaviour. Use with care and only if necessary. Deprecated; scheduled for removal in 24.0.0 .. versionadded:: 22.0.0 """ def validate_header_map_behaviour(val): # FIXME: refactor all of this subclassing stdlib argparse if val is None: return if not isinstance(val, str): raise TypeError("Invalid type for casting: %s" % val) if val.lower().strip() == "drop": return "drop" elif val.lower().strip() == "refuse": return "refuse" elif val.lower().strip() == "dangerous": return "dangerous" else: raise ValueError("Invalid header map behaviour: %s" % val) class ForwarderHeaders(Setting): name = "forwarder_headers" section = "Server Mechanics" cli = ["--forwarder-headers"] validator = validate_string_to_list default = "SCRIPT_NAME,PATH_INFO" desc = """\ A list containing upper-case header field names that the front-end proxy (see :ref:`forwarded-allow-ips`) sets, to be used in WSGI environment. This option has no effect for headers not present in the request. This option can be used to transfer ``SCRIPT_NAME``, ``PATH_INFO`` and ``REMOTE_USER``. It is important that your front-end proxy configuration ensures that the headers defined here can not be passed directly from the client. """ class HeaderMap(Setting): name = "header_map" section = "Server Mechanics" cli = ["--header-map"] validator = validate_header_map_behaviour default = "drop" desc = """\ Configure how header field names are mapped into environ Headers containing underscores are permitted by RFC9110, but gunicorn joining headers of different names into the same environment variable will dangerously confuse applications as to which is which. The safe default ``drop`` is to silently drop headers that cannot be unambiguously mapped. The value ``refuse`` will return an error if a request contains *any* such header. The value ``dangerous`` matches the previous, not advisable, behaviour of mapping different header field names into the same environ name. If the source is permitted as explained in :ref:`forwarded-allow-ips`, *and* the header name is present in :ref:`forwarder-headers`, the header is mapped into environment regardless of the state of this setting. Use with care and only if necessary and after considering if your problem could instead be solved by specifically renaming or rewriting only the intended headers on a proxy in front of Gunicorn. .. versionadded:: 22.0.0 """ def validate_asgi_loop(val): if val is None: return "auto" if not isinstance(val, str): raise TypeError("Invalid type for casting: %s" % val) val = val.lower().strip() if val not in ("auto", "asyncio", "uvloop"): raise ValueError("Invalid ASGI loop: %s" % val) return val def validate_asgi_lifespan(val): if val is None: return "auto" if not isinstance(val, str): raise TypeError("Invalid type for casting: %s" % val) val = val.lower().strip() if val not in ("auto", "on", "off"): raise ValueError("Invalid ASGI lifespan: %s" % val) return val class ASGILoop(Setting): name = "asgi_loop" section = "Worker Processes" cli = ["--asgi-loop"] meta = "STRING" validator = validate_asgi_loop default = "auto" desc = """\ Event loop implementation for ASGI workers. - auto: Use uvloop if available, otherwise asyncio - asyncio: Use Python's built-in asyncio event loop - uvloop: Use uvloop (must be installed separately) This setting only affects the ``asgi`` worker type. uvloop typically provides better performance but requires installing the uvloop package. .. versionadded:: 24.0.0 """ class ASGILifespan(Setting): name = "asgi_lifespan" section = "Worker Processes" cli = ["--asgi-lifespan"] meta = "STRING" validator = validate_asgi_lifespan default = "auto" desc = """\ Control ASGI lifespan protocol handling. - auto: Detect if app supports lifespan, enable if so - on: Always run lifespan protocol (fail if unsupported) - off: Never run lifespan protocol The lifespan protocol allows ASGI applications to run code at startup and shutdown. This is essential for frameworks like FastAPI that need to initialize database connections, caches, or other resources. This setting only affects the ``asgi`` worker type. .. versionadded:: 24.0.0 """ class ASGIDisconnectGracePeriod(Setting): name = "asgi_disconnect_grace_period" section = "Worker Processes" cli = ["--asgi-disconnect-grace-period"] meta = "INT" validator = validate_pos_int type = int default = 3 desc = """\ Grace period (seconds) for ASGI apps to handle client disconnects. When a client disconnects, the ASGI app receives an http.disconnect message and has this many seconds to clean up resources (like database connections) before the request task is cancelled. Set to 0 to cancel immediately (not recommended for apps with async database connections). Apps with long-running database operations may need to increase this value. This setting only affects the ``asgi`` worker type. .. versionadded:: 25.0.0 """ class RootPath(Setting): name = "root_path" section = "Server Mechanics" cli = ["--root-path"] meta = "STRING" validator = validate_string default = "" desc = """\ The root path for ASGI applications. This is used to set the ``root_path`` in the ASGI scope, which allows applications to know their mount point when behind a reverse proxy. For example, if your application is mounted at ``/api``, set this to ``/api``. .. versionadded:: 24.0.0 """ # ============================================================================= # Dirty Arbiters - Separate process pool for long-running operations # ============================================================================= class DirtyApps(Setting): name = "dirty_apps" section = "Dirty Arbiters" cli = ["--dirty-app"] action = "append" meta = "STRING" validator = validate_list_string default = [] desc = """\ Dirty applications to load in the dirty worker pool. A list of application paths in one of these formats: - ``$(MODULE_NAME):$(CLASS_NAME)`` - all workers load this app - ``$(MODULE_NAME):$(CLASS_NAME):$(N)`` - only N workers load this app Each dirty app must be a class that inherits from ``DirtyApp`` base class and implements the ``init()``, ``__call__()``, and ``close()`` methods. Example:: dirty_apps = [ "myapp.ml:MLApp", # All workers load this "myapp.images:ImageApp", # All workers load this "myapp.heavy:HugeModel:2", # Only 2 workers load this ] The per-app worker limit is useful for memory-intensive applications like large ML models. Instead of all 8 workers loading a 10GB model (80GB total), you can limit it to 2 workers (20GB total). Alternatively, you can set the ``workers`` class attribute on your DirtyApp subclass:: class HugeModelApp(DirtyApp): workers = 2 # Only 2 workers load this app def init(self): self.model = load_10gb_model() Note: The config format (``module:Class:N``) takes precedence over the class attribute if both are specified. Dirty apps are loaded once when the dirty worker starts and persist in memory for the lifetime of the worker. This is ideal for loading ML models, database connection pools, or other stateful resources that are expensive to initialize. .. versionadded:: 25.0.0 .. versionchanged:: 25.1.0 Added per-app worker allocation via ``:N`` format suffix. """ class DirtyWorkers(Setting): name = "dirty_workers" section = "Dirty Arbiters" cli = ["--dirty-workers"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The number of dirty worker processes. A positive integer. Set to 0 (default) to disable the dirty arbiter. When set to a positive value, a dirty arbiter process will be spawned to manage the dirty worker pool. Dirty workers are separate from HTTP workers and are designed for long-running, blocking operations like ML model inference or heavy computation. .. versionadded:: 25.0.0 """ class DirtyTimeout(Setting): name = "dirty_timeout" section = "Dirty Arbiters" cli = ["--dirty-timeout"] meta = "INT" validator = validate_pos_int type = int default = 300 desc = """\ Timeout for dirty task execution in seconds. Workers silent for more than this many seconds are considered stuck and will be killed. Set to a high value for operations like model loading that may take a long time. Value is a positive number. Setting it to 0 disables timeout checking. .. versionadded:: 25.0.0 """ class DirtyThreads(Setting): name = "dirty_threads" section = "Dirty Arbiters" cli = ["--dirty-threads"] meta = "INT" validator = validate_pos_int type = int default = 1 desc = """\ The number of threads per dirty worker. Each dirty worker can use threads to handle concurrent operations within the same process, useful for async-safe applications. .. versionadded:: 25.0.0 """ class DirtyGracefulTimeout(Setting): name = "dirty_graceful_timeout" section = "Dirty Arbiters" cli = ["--dirty-graceful-timeout"] meta = "INT" validator = validate_pos_int type = int default = 30 desc = """\ Timeout for graceful dirty worker shutdown in seconds. After receiving a shutdown signal, dirty workers have this much time to finish their current tasks. Workers still alive after the timeout are force killed. .. versionadded:: 25.0.0 """ # ============================================================================= # Dirty Arbiter Hooks # ============================================================================= class OnDirtyStarting(Setting): name = "on_dirty_starting" section = "Dirty Arbiter Hooks" validator = validate_callable(1) type = callable def on_dirty_starting(arbiter): pass default = staticmethod(on_dirty_starting) desc = """\ Called just before the dirty arbiter process is initialized. The callable needs to accept a single instance variable for the DirtyArbiter. .. versionadded:: 25.0.0 """ class DirtyPostFork(Setting): name = "dirty_post_fork" section = "Dirty Arbiter Hooks" validator = validate_callable(2) type = callable def dirty_post_fork(arbiter, worker): pass default = staticmethod(dirty_post_fork) desc = """\ Called just after a dirty worker has been forked. The callable needs to accept two instance variables for the DirtyArbiter and new DirtyWorker. .. versionadded:: 25.0.0 """ class DirtyWorkerInit(Setting): name = "dirty_worker_init" section = "Dirty Arbiter Hooks" validator = validate_callable(1) type = callable def dirty_worker_init(worker): pass default = staticmethod(dirty_worker_init) desc = """\ Called just after a dirty worker has initialized all applications. The callable needs to accept one instance variable for the DirtyWorker. .. versionadded:: 25.0.0 """ class DirtyWorkerExit(Setting): name = "dirty_worker_exit" section = "Dirty Arbiter Hooks" validator = validate_callable(2) type = callable def dirty_worker_exit(arbiter, worker): pass default = staticmethod(dirty_worker_exit) desc = """\ Called when a dirty worker has exited. The callable needs to accept two instance variables for the DirtyArbiter and the exiting DirtyWorker. .. versionadded:: 25.0.0 """ # Control Socket Settings class ControlSocket(Setting): name = "control_socket" section = "Control" cli = ["--control-socket"] meta = "PATH" validator = validate_string default = "gunicorn.ctl" desc = """\ Unix socket path for control interface. The control socket allows runtime management of Gunicorn via the ``gunicornc`` command-line tool. Commands include viewing worker status, adjusting worker count, and graceful reload/shutdown. By default, creates ``gunicorn.ctl`` in the working directory. Set an absolute path for a fixed location (e.g., ``/var/run/gunicorn.ctl``). Use ``--no-control-socket`` to disable. .. versionadded:: 25.1.0 """ class ControlSocketMode(Setting): name = "control_socket_mode" section = "Control" cli = ["--control-socket-mode"] meta = "INT" validator = validate_pos_int type = auto_int default = 0o600 desc = """\ Permission mode for control socket. Restricts who can connect to the control socket. Default ``0600`` allows only the socket owner. Set to ``0660`` to allow group access. .. versionadded:: 25.1.0 """ class ControlSocketDisable(Setting): name = "control_socket_disable" section = "Control" cli = ["--no-control-socket"] validator = validate_bool action = "store_true" default = False desc = """\ Disable control socket. When set, no control socket is created and ``gunicornc`` cannot connect to this Gunicorn instance. .. versionadded:: 25.1.0 """ benoitc-gunicorn-f5fb19e/gunicorn/ctl/000077500000000000000000000000001514360242400200715ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/ctl/__init__.py000066400000000000000000000007601514360242400222050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn Control Interface Provides a control socket server for runtime management and a CLI client (gunicornc) for interacting with running Gunicorn instances. """ from gunicorn.ctl.server import ControlSocketServer from gunicorn.ctl.client import ControlClient from gunicorn.ctl.protocol import ControlProtocol __all__ = ['ControlSocketServer', 'ControlClient', 'ControlProtocol'] benoitc-gunicorn-f5fb19e/gunicorn/ctl/cli.py000066400000000000000000000314661514360242400212240ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ gunicornc - Gunicorn control interface CLI Interactive and single-command modes for controlling Gunicorn instances. """ import argparse import json import os import sys from gunicorn.ctl.client import ControlClient, ControlClientError, parse_command def format_workers(data: dict) -> str: """Format workers output for display.""" workers = data.get("workers", []) if not workers: return "No workers running" lines = [] lines.append(f"{'PID':<10} {'AGE':<6} {'BOOTED':<8} {'LAST_BEAT'}") lines.append("-" * 40) for w in workers: pid = w.get("pid", "?") age = w.get("age", "?") booted = "yes" if w.get("booted") else "no" hb = w.get("last_heartbeat") hb_str = f"{hb}s ago" if hb is not None else "n/a" lines.append(f"{pid:<10} {age:<6} {booted:<8} {hb_str}") lines.append("") lines.append(f"Total: {data.get('count', len(workers))} workers") return "\n".join(lines) def format_dirty(data: dict) -> str: """Format dirty workers output for display.""" if not data.get("enabled"): return "Dirty arbiter not running" lines = [] lines.append(f"Dirty arbiter PID: {data.get('pid')}") lines.append("") workers = data.get("workers", []) if workers: lines.append("DIRTY WORKERS:") lines.append(f"{'PID':<10} {'AGE':<6} {'APPS':<30} {'LAST_BEAT'}") lines.append("-" * 60) for w in workers: pid = w.get("pid", "?") age = w.get("age", "?") apps = ", ".join(w.get("apps", []))[:30] hb = w.get("last_heartbeat") hb_str = f"{hb}s ago" if hb is not None else "n/a" lines.append(f"{pid:<10} {age:<6} {apps:<30} {hb_str}") lines.append("") apps = data.get("apps", []) if apps: lines.append("DIRTY APPS:") lines.append(f"{'APP':<30} {'WORKERS':<10} {'LIMIT'}") lines.append("-" * 50) for app in apps: path = app.get("import_path", "?")[:30] current = app.get("current_workers", 0) limit = app.get("worker_count") limit_str = str(limit) if limit is not None else "none" lines.append(f"{path:<30} {current:<10} {limit_str}") return "\n".join(lines) def format_stats(data: dict) -> str: """Format stats output for display.""" lines = [] uptime = data.get("uptime") if uptime: hours = int(uptime // 3600) minutes = int((uptime % 3600) // 60) seconds = int(uptime % 60) if hours: uptime_str = f"{hours}h {minutes}m {seconds}s" elif minutes: uptime_str = f"{minutes}m {seconds}s" else: uptime_str = f"{seconds}s" else: uptime_str = "unknown" lines.append(f"Uptime: {uptime_str}") lines.append(f"PID: {data.get('pid', 'unknown')}") lines.append(f"Workers current: {data.get('workers_current', 0)}") lines.append(f"Workers target: {data.get('workers_target', 0)}") lines.append(f"Workers spawned: {data.get('workers_spawned', 0)}") lines.append(f"Workers killed: {data.get('workers_killed', 0)}") lines.append(f"Reloads: {data.get('reloads', 0)}") dirty_pid = data.get("dirty_arbiter_pid") if dirty_pid: lines.append(f"Dirty arbiter: {dirty_pid}") return "\n".join(lines) def format_listeners(data: dict) -> str: """Format listeners output for display.""" listeners = data.get("listeners", []) if not listeners: return "No listeners bound" lines = [] lines.append(f"{'ADDRESS':<40} {'TYPE':<8} {'FD'}") lines.append("-" * 55) for lnr in listeners: addr = lnr.get("address", "?") ltype = lnr.get("type", "?") fd = lnr.get("fd", "?") lines.append(f"{addr:<40} {ltype:<8} {fd}") lines.append("") lines.append(f"Total: {data.get('count', len(listeners))} listeners") return "\n".join(lines) def format_config(data: dict) -> str: """Format config output for display.""" lines = [] # Sort keys for consistent output for key in sorted(data.keys()): value = data[key] if isinstance(value, list): value = ", ".join(str(v) for v in value) lines.append(f"{key}: {value}") return "\n".join(lines) def format_help(data: dict) -> str: """Format help output for display.""" commands = data.get("commands", {}) lines = [] lines.append("Available commands:") lines.append("") # Find max command length for alignment max_len = max(len(cmd) for cmd in commands.keys()) if commands else 0 for cmd, desc in sorted(commands.items()): lines.append(f" {cmd:<{max_len + 2}} {desc}") return "\n".join(lines) def format_all(data: dict) -> str: """Format show all output for display.""" lines = [] # Arbiter arbiter = data.get("arbiter", {}) lines.append("ARBITER (master)") lines.append(f" PID: {arbiter.get('pid', '?')}") lines.append("") # Web workers web_workers = data.get("web_workers", []) lines.append(f"WEB WORKERS ({data.get('web_worker_count', 0)})") if web_workers: lines.append(f" {'PID':<10} {'AGE':<6} {'BOOTED':<8} {'LAST_BEAT'}") lines.append(f" {'-' * 38}") for w in web_workers: pid = w.get("pid", "?") age = w.get("age", "?") booted = "yes" if w.get("booted") else "no" hb = w.get("last_heartbeat") hb_str = f"{hb}s ago" if hb is not None else "n/a" lines.append(f" {pid:<10} {age:<6} {booted:<8} {hb_str}") else: lines.append(" (none)") lines.append("") # Dirty arbiter dirty_arbiter = data.get("dirty_arbiter") if dirty_arbiter: lines.append("DIRTY ARBITER") lines.append(f" PID: {dirty_arbiter.get('pid', '?')}") lines.append("") # Dirty workers dirty_workers = data.get("dirty_workers", []) lines.append(f"DIRTY WORKERS ({data.get('dirty_worker_count', 0)})") if dirty_workers: lines.append(f" {'PID':<10} {'AGE':<6} {'APPS'}") lines.append(f" {'-' * 50}") for w in dirty_workers: pid = w.get("pid", "?") age = w.get("age", "?") apps = w.get("apps", []) # Show each app on its own line if multiple if apps: first_app = apps[0].split(":")[-1] # Just the class name lines.append(f" {pid:<10} {age:<6} {first_app}") for app in apps[1:]: app_name = app.split(":")[-1] lines.append(f" {'':<10} {'':<6} {app_name}") else: lines.append(f" {pid:<10} {age:<6} (no apps)") else: lines.append(" (none)") else: lines.append("DIRTY ARBITER") lines.append(" (not running)") return "\n".join(lines) def format_response(command: str, data: dict) -> str: # pylint: disable=too-many-return-statements """ Format response data based on command. Args: command: Original command string data: Response data dictionary Returns: Formatted string for display """ cmd_lower = command.lower().strip() # Route to specific formatters if cmd_lower == "show all": return format_all(data) elif cmd_lower == "show workers": return format_workers(data) elif cmd_lower == "show dirty": return format_dirty(data) elif cmd_lower == "show stats": return format_stats(data) elif cmd_lower == "show listeners": return format_listeners(data) elif cmd_lower == "show config": return format_config(data) elif cmd_lower == "help": return format_help(data) else: # Generic JSON output for other commands if data: return json.dumps(data, indent=2) return "OK" def run_command(socket_path: str, command: str, json_output: bool = False) -> int: """ Execute single command and exit. Args: socket_path: Path to control socket command: Command to execute json_output: If True, output raw JSON Returns: Exit code (0 for success, 1 for error) """ try: with ControlClient(socket_path) as client: cmd, args = parse_command(command) full_command = f"{cmd} {' '.join(args)}".strip() if args else cmd result = client.send_command(full_command) if json_output: print(json.dumps(result, indent=2)) else: output = format_response(cmd, result) print(output) return 0 except ControlClientError as e: print(f"Error: {e}", file=sys.stderr) return 1 except KeyboardInterrupt: return 130 def run_interactive(socket_path: str, json_output: bool = False) -> int: """ Run interactive CLI with readline support. Args: socket_path: Path to control socket json_output: If True, output raw JSON Returns: Exit code """ try: import readline # noqa: F401 - imported for side effects has_readline = True except ImportError: has_readline = False try: client = ControlClient(socket_path) client.connect() except ControlClientError as e: print(f"Error: {e}", file=sys.stderr) return 1 print(f"Connected to {socket_path}") print("Type 'help' for available commands, 'quit' to exit.") print() # Set up readline history history_file = os.path.expanduser("~/.gunicornc_history") if has_readline: try: readline.read_history_file(history_file) except FileNotFoundError: pass exit_code = 0 try: while True: try: line = input("gunicorn> ").strip() except EOFError: print() break if not line: continue if line.lower() in ('quit', 'exit', 'q'): break try: cmd, args = parse_command(line) full_command = f"{cmd} {' '.join(args)}".strip() if args else cmd result = client.send_command(full_command) if json_output: print(json.dumps(result, indent=2)) else: output = format_response(cmd, result) print(output) except ControlClientError as e: print(f"Error: {e}") # Try to reconnect try: client.close() client.connect() except ControlClientError: print("Connection lost. Exiting.") exit_code = 1 break print() except KeyboardInterrupt: print() exit_code = 130 finally: client.close() if has_readline: try: readline.write_history_file(history_file) except Exception: pass return exit_code def main(): """Main entry point for gunicornc CLI.""" parser = argparse.ArgumentParser( description='Gunicorn control interface', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: gunicornc # Interactive mode (default socket) gunicornc -s /tmp/myapp.ctl # Interactive mode with custom socket gunicornc -c "show workers" # Single command mode gunicornc -c "worker add 2" # Add 2 workers gunicornc -c "show stats" -j # Output stats as JSON """ ) parser.add_argument( '-s', '--socket', default='gunicorn.ctl', help='Control socket path (default: gunicorn.ctl in current directory)' ) parser.add_argument( '-c', '--command', help='Execute single command and exit' ) parser.add_argument( '-j', '--json', action='store_true', help='Output raw JSON (for scripting)' ) parser.add_argument( '-v', '--version', action='store_true', help='Show version and exit' ) args = parser.parse_args() if args.version: from gunicorn import __version__ print(f"gunicornc (gunicorn {__version__})") return 0 socket_path = args.socket # Make relative paths absolute from cwd if not os.path.isabs(socket_path): socket_path = os.path.join(os.getcwd(), socket_path) if args.command: return run_command(socket_path, args.command, args.json) else: return run_interactive(socket_path, args.json) if __name__ == '__main__': sys.exit(main()) benoitc-gunicorn-f5fb19e/gunicorn/ctl/client.py000066400000000000000000000066261514360242400217330ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Control Socket Client Client library for connecting to gunicorn control socket. """ import shlex import socket from gunicorn.ctl.protocol import ( ControlProtocol, make_request, ) class ControlClientError(Exception): """Control client error.""" class ControlClient: """ Client for connecting to gunicorn control socket. Can be used as a context manager: with ControlClient('/path/to/gunicorn.ctl') as client: result = client.send_command('show workers') """ def __init__(self, socket_path: str, timeout: float = 30.0): """ Initialize control client. Args: socket_path: Path to the Unix socket timeout: Socket timeout in seconds (default 30) """ self.socket_path = socket_path self.timeout = timeout self._sock = None self._request_id = 0 def connect(self): """ Connect to control socket. Raises: ControlClientError: If connection fails """ if self._sock: return try: self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self._sock.settimeout(self.timeout) self._sock.connect(self.socket_path) except socket.error as e: self._sock = None raise ControlClientError(f"Failed to connect to {self.socket_path}: {e}") def close(self): """Close connection.""" if self._sock: try: self._sock.close() except Exception: pass self._sock = None def send_command(self, command: str, args: list = None) -> dict: """ Send command and wait for response. Args: command: Command string (e.g., "show workers") args: Optional additional arguments Returns: Response data dictionary Raises: ControlClientError: If communication fails """ if not self._sock: self.connect() self._request_id += 1 request = make_request(self._request_id, command, args) try: ControlProtocol.write_message(self._sock, request) response = ControlProtocol.read_message(self._sock) except Exception as e: self.close() raise ControlClientError(f"Communication error: {e}") if response.get("status") == "error": raise ControlClientError(response.get("error", "Unknown error")) return response.get("data", {}) def __enter__(self): self.connect() return self def __exit__(self, *args): self.close() def parse_command(line: str) -> tuple: """ Parse a command line into command and args. Args: line: Command line string Returns: Tuple of (command_string, args_list) """ parts = shlex.split(line) if not parts: return "", [] # Find where numeric/value args start command_parts = [] args = [] for part in parts: # If we haven't hit args yet and this looks like a command word if not args and not part.isdigit() and not part.startswith('-'): command_parts.append(part) else: args.append(part) return " ".join(command_parts), args benoitc-gunicorn-f5fb19e/gunicorn/ctl/handlers.py000066400000000000000000000434371514360242400222560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Control Interface Command Handlers Provides handlers for all control commands with access to arbiter state. """ import os import signal import socket import time class CommandHandlers: """ Command handlers with access to arbiter state. All handler methods return dictionaries that will be sent as the response data. """ def __init__(self, arbiter): """ Initialize handlers with arbiter reference. Args: arbiter: The Gunicorn arbiter instance """ self.arbiter = arbiter def show_workers(self) -> dict: """ Return list of HTTP workers. Returns: Dictionary with workers list containing: - pid: Worker process ID - age: Worker age (spawn order) - requests: Number of requests handled (if available) - booted: Whether worker has finished booting - last_heartbeat: Seconds since last heartbeat """ workers = [] now = time.monotonic() for pid, worker in self.arbiter.WORKERS.items(): try: last_update = worker.tmp.last_update() last_heartbeat = round(now - last_update, 2) except (OSError, ValueError): last_heartbeat = None workers.append({ "pid": pid, "age": worker.age, "booted": worker.booted, "aborted": worker.aborted, "last_heartbeat": last_heartbeat, }) # Sort by age (oldest first) workers.sort(key=lambda w: w["age"]) return {"workers": workers, "count": len(workers)} def show_dirty(self) -> dict: """ Return dirty workers and apps information. Returns: Dictionary with: - enabled: Whether dirty arbiter is running - pid: Dirty arbiter PID - workers: List of dirty worker info - apps: List of dirty app specs """ if not self.arbiter.dirty_arbiter_pid: return { "enabled": False, "pid": None, "workers": [], "apps": [], } # Get dirty arbiter reference if available dirty_arbiter = getattr(self.arbiter, 'dirty_arbiter', None) workers = [] apps = [] if dirty_arbiter and hasattr(dirty_arbiter, 'workers'): now = time.monotonic() for pid, worker in dirty_arbiter.workers.items(): try: last_update = worker.tmp.last_update() last_heartbeat = round(now - last_update, 2) except (OSError, ValueError, AttributeError): last_heartbeat = None workers.append({ "pid": pid, "age": worker.age, "apps": getattr(worker, 'app_paths', []), "booted": getattr(worker, 'booted', False), "last_heartbeat": last_heartbeat, }) # Get app specs if hasattr(dirty_arbiter, 'app_specs'): for path, spec in dirty_arbiter.app_specs.items(): worker_pids = list(dirty_arbiter.app_worker_map.get(path, [])) apps.append({ "import_path": path, "worker_count": spec.get('worker_count'), "current_workers": len(worker_pids), "worker_pids": worker_pids, }) return { "enabled": True, "pid": self.arbiter.dirty_arbiter_pid, "workers": workers, "apps": apps, } def show_config(self) -> dict: """ Return current effective configuration. Returns: Dictionary of configuration values """ cfg = self.arbiter.cfg config = {} # Get commonly needed config values config_keys = [ 'bind', 'workers', 'worker_class', 'threads', 'timeout', 'graceful_timeout', 'keepalive', 'max_requests', 'max_requests_jitter', 'worker_connections', 'preload_app', 'daemon', 'pidfile', 'proc_name', 'reload', 'dirty_workers', 'dirty_apps', 'dirty_timeout', 'control_socket', 'control_socket_disable', ] for key in config_keys: try: value = getattr(cfg, key) # Convert non-serializable types if callable(value): value = str(value) elif hasattr(value, '__class__') and not isinstance( value, (str, int, float, bool, list, dict, type(None))): value = str(value) config[key] = value except AttributeError: pass return config def show_stats(self) -> dict: """ Return server statistics. Returns: Dictionary with: - uptime: Seconds since arbiter started - pid: Arbiter PID - workers_current: Current number of workers - workers_spawned: Total workers spawned - workers_killed: Total workers killed (if tracked) - reloads: Number of reloads (if tracked) """ stats = getattr(self.arbiter, '_stats', {}) start_time = stats.get('start_time') uptime = None if start_time: uptime = round(time.time() - start_time, 2) return { "uptime": uptime, "pid": self.arbiter.pid, "workers_current": len(self.arbiter.WORKERS), "workers_target": self.arbiter.num_workers, "workers_spawned": stats.get('workers_spawned', 0), "workers_killed": stats.get('workers_killed', 0), "reloads": stats.get('reloads', 0), "dirty_arbiter_pid": self.arbiter.dirty_arbiter_pid or None, } def show_listeners(self) -> dict: """ Return bound socket information. Returns: Dictionary with listeners list """ listeners = [] for lnr in self.arbiter.LISTENERS: addr = str(lnr) listener_info = { "address": addr, "fd": lnr.fileno(), } # Try to get socket family try: sock = lnr.sock if sock.family == socket.AF_UNIX: listener_info["type"] = "unix" elif sock.family == socket.AF_INET: listener_info["type"] = "tcp" elif sock.family == socket.AF_INET6: listener_info["type"] = "tcp6" except Exception: listener_info["type"] = "unknown" listeners.append(listener_info) return {"listeners": listeners, "count": len(listeners)} def worker_add(self, count: int = 1) -> dict: """ Increase worker count. Args: count: Number of workers to add (default 1) Returns: Dictionary with added count and new total """ count = max(1, int(count)) old_count = self.arbiter.num_workers self.arbiter.num_workers += count # Wake up the arbiter to spawn workers self.arbiter.wakeup() return { "added": count, "previous": old_count, "total": self.arbiter.num_workers, } def worker_remove(self, count: int = 1) -> dict: """ Decrease worker count. Args: count: Number of workers to remove (default 1) Returns: Dictionary with removed count and new total """ count = max(1, int(count)) old_count = self.arbiter.num_workers # Don't go below 1 worker new_count = max(1, old_count - count) actual_removed = old_count - new_count self.arbiter.num_workers = new_count # Wake up the arbiter to kill excess workers self.arbiter.wakeup() return { "removed": actual_removed, "previous": old_count, "total": new_count, } def worker_kill(self, pid: int) -> dict: """ Gracefully terminate a specific worker. Args: pid: Worker process ID Returns: Dictionary with killed PID or error """ pid = int(pid) if pid not in self.arbiter.WORKERS: return { "success": False, "error": f"Worker {pid} not found", } try: os.kill(pid, signal.SIGTERM) return { "success": True, "killed": pid, } except OSError as e: return { "success": False, "error": str(e), } def dirty_add(self, count: int = 1) -> dict: """ Spawn additional dirty workers. Sends a MANAGE message to the dirty arbiter to spawn workers. Args: count: Number of dirty workers to add (default 1) Returns: Dictionary with added count or error """ if not self.arbiter.dirty_arbiter_pid: return { "success": False, "error": "Dirty arbiter not running", } count = max(1, int(count)) return self._send_manage_message("add", count) def dirty_remove(self, count: int = 1) -> dict: """ Remove dirty workers. Sends a MANAGE message to the dirty arbiter to remove workers. Args: count: Number of dirty workers to remove (default 1) Returns: Dictionary with removed count or error """ if not self.arbiter.dirty_arbiter_pid: return { "success": False, "error": "Dirty arbiter not running", } count = max(1, int(count)) return self._send_manage_message("remove", count) def _send_manage_message(self, operation: str, count: int) -> dict: """ Send a worker management message to the dirty arbiter. Args: operation: "add" or "remove" count: Number of workers to add/remove Returns: Dictionary with result or error """ # Get socket path from arbiter object or environment dirty_socket_path = None if hasattr(self.arbiter, 'dirty_arbiter') and self.arbiter.dirty_arbiter: dirty_socket_path = getattr( self.arbiter.dirty_arbiter, 'socket_path', None ) if not dirty_socket_path: dirty_socket_path = os.environ.get('GUNICORN_DIRTY_SOCKET') if not dirty_socket_path: return { "success": False, "error": "Cannot find dirty arbiter socket path", } try: from gunicorn.dirty.protocol import ( DirtyProtocol, MANAGE_OP_ADD, MANAGE_OP_REMOVE ) op = MANAGE_OP_ADD if operation == "add" else MANAGE_OP_REMOVE sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(10.0) sock.connect(dirty_socket_path) # Send manage request request = { "type": DirtyProtocol.MSG_TYPE_MANAGE, "id": 1, "op": op, "count": count, } DirtyProtocol.write_message(sock, request) # Read response response = DirtyProtocol.read_message(sock) sock.close() if response.get("type") == DirtyProtocol.MSG_TYPE_RESPONSE: return response.get("result", {"success": True}) elif response.get("type") == DirtyProtocol.MSG_TYPE_ERROR: error = response.get("error", {}) return { "success": False, "error": error.get("message", str(error)), } else: return { "success": False, "error": f"Unexpected response type: {response.get('type')}", } except Exception as e: return { "success": False, "error": str(e), } def reload(self) -> dict: """ Trigger graceful reload (equivalent to SIGHUP). Returns: Dictionary with status """ # Send HUP to self to trigger reload os.kill(self.arbiter.pid, signal.SIGHUP) return {"status": "reloading"} def reopen(self) -> dict: """ Reopen log files (equivalent to SIGUSR1). Returns: Dictionary with status """ os.kill(self.arbiter.pid, signal.SIGUSR1) return {"status": "reopening"} def shutdown(self, mode: str = "graceful") -> dict: """ Initiate shutdown. Args: mode: "graceful" (SIGTERM) or "quick" (SIGINT) Returns: Dictionary with status """ if mode == "quick": os.kill(self.arbiter.pid, signal.SIGINT) else: os.kill(self.arbiter.pid, signal.SIGTERM) return {"status": "shutting_down", "mode": mode} def show_all(self) -> dict: """ Return overview of all processes (arbiter, web workers, dirty arbiter, dirty workers). Returns: Dictionary with complete process hierarchy """ now = time.monotonic() # Arbiter info arbiter_info = { "pid": self.arbiter.pid, "type": "arbiter", "role": "master", } # Web workers (HTTP workers) web_workers = [] for pid, worker in self.arbiter.WORKERS.items(): try: last_update = worker.tmp.last_update() last_heartbeat = round(now - last_update, 2) except (OSError, ValueError): last_heartbeat = None web_workers.append({ "pid": pid, "type": "web", "age": worker.age, "booted": worker.booted, "last_heartbeat": last_heartbeat, }) # Sort by age web_workers.sort(key=lambda w: w["age"]) # Dirty arbiter info (runs in separate process) dirty_arbiter_info = None dirty_workers = [] if self.arbiter.dirty_arbiter_pid: dirty_arbiter_info = { "pid": self.arbiter.dirty_arbiter_pid, "type": "dirty_arbiter", "role": "dirty master", } # Query dirty arbiter for worker info via its socket dirty_workers = self._query_dirty_workers() return { "arbiter": arbiter_info, "web_workers": web_workers, "web_worker_count": len(web_workers), "dirty_arbiter": dirty_arbiter_info, "dirty_workers": dirty_workers, "dirty_worker_count": len(dirty_workers), } def _query_dirty_workers(self) -> list: """ Query the dirty arbiter for worker information. Connects to the dirty arbiter socket and sends a status request. Returns: List of dirty worker info dicts, or empty list on error """ # Get socket path from arbiter object or environment dirty_socket_path = None if hasattr(self.arbiter, 'dirty_arbiter') and self.arbiter.dirty_arbiter: dirty_socket_path = getattr(self.arbiter.dirty_arbiter, 'socket_path', None) if not dirty_socket_path: dirty_socket_path = os.environ.get('GUNICORN_DIRTY_SOCKET') if not dirty_socket_path: return [] try: from gunicorn.dirty.protocol import DirtyProtocol sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(2.0) sock.connect(dirty_socket_path) # Send status request request = { "type": DirtyProtocol.MSG_TYPE_STATUS, "id": "ctl-status-1", } DirtyProtocol.write_message(sock, request) # Read response response = DirtyProtocol.read_message(sock) sock.close() if response.get("type") == DirtyProtocol.MSG_TYPE_RESPONSE: result = response.get("result", {}) return result.get("workers", []) except Exception: pass return [] def help(self) -> dict: """ Return list of available commands. Returns: Dictionary with commands and descriptions """ commands = { "show all": "Show all processes (arbiter, web workers, dirty workers)", "show workers": "List HTTP workers with their status", "show dirty": "List dirty workers and apps", "show config": "Show current effective configuration", "show stats": "Show server statistics", "show listeners": "Show bound sockets", "worker add [N]": "Spawn N workers (default 1)", "worker remove [N]": "Remove N workers (default 1)", "worker kill ": "Gracefully terminate specific worker", "dirty add [N]": "Spawn N dirty workers (default 1)", "dirty remove [N]": "Remove N dirty workers (default 1)", "reload": "Graceful reload (HUP)", "reopen": "Reopen log files (USR1)", "shutdown [graceful|quick]": "Shutdown server (TERM/INT)", "help": "Show this help message", } return {"commands": commands} benoitc-gunicorn-f5fb19e/gunicorn/ctl/protocol.py000066400000000000000000000131111514360242400223010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Control Socket Protocol JSON-based protocol with length-prefixed framing for the control interface. Message Format: +----------------+------------------+ | Length (4B BE) | JSON Payload | +----------------+------------------+ Request Format: {"id": 1, "command": "show", "args": ["workers"]} Response Format: {"id": 1, "status": "ok", "data": {...}} {"id": 1, "status": "error", "error": "message"} """ import json import struct class ProtocolError(Exception): """Protocol-level error.""" class ControlProtocol: """ Protocol implementation for control socket communication. Uses 4-byte big-endian length prefix followed by JSON payload. """ # Maximum message size (16 MB) MAX_MESSAGE_SIZE = 16 * 1024 * 1024 @staticmethod def encode_message(data: dict) -> bytes: """ Encode a message for transmission. Args: data: Dictionary to encode Returns: Length-prefixed JSON bytes """ payload = json.dumps(data).encode('utf-8') length = struct.pack('>I', len(payload)) return length + payload @staticmethod def decode_message(data: bytes) -> dict: """ Decode a message from bytes. Args: data: Raw bytes (length prefix + JSON payload) Returns: Decoded dictionary """ if len(data) < 4: raise ProtocolError("Message too short") length = struct.unpack('>I', data[:4])[0] if len(data) < 4 + length: raise ProtocolError("Incomplete message") payload = data[4:4 + length] return json.loads(payload.decode('utf-8')) @staticmethod def read_message(sock) -> dict: """ Read one message from a socket. Args: sock: Socket to read from Returns: Decoded message dictionary Raises: ProtocolError: If message is malformed ConnectionError: If connection is closed """ # Read length prefix length_data = b'' while len(length_data) < 4: chunk = sock.recv(4 - len(length_data)) if not chunk: if not length_data: raise ConnectionError("Connection closed") raise ProtocolError("Incomplete length prefix") length_data += chunk length = struct.unpack('>I', length_data)[0] if length > ControlProtocol.MAX_MESSAGE_SIZE: raise ProtocolError(f"Message too large: {length}") # Read payload payload_data = b'' while len(payload_data) < length: chunk = sock.recv(min(length - len(payload_data), 65536)) if not chunk: raise ProtocolError("Incomplete payload") payload_data += chunk try: return json.loads(payload_data.decode('utf-8')) except json.JSONDecodeError as e: raise ProtocolError(f"Invalid JSON: {e}") @staticmethod def write_message(sock, data: dict): """ Write one message to a socket. Args: sock: Socket to write to data: Message dictionary to send """ message = ControlProtocol.encode_message(data) sock.sendall(message) @staticmethod async def read_message_async(reader) -> dict: """ Read one message from an async reader. Args: reader: asyncio StreamReader Returns: Decoded message dictionary """ # Read length prefix length_data = await reader.readexactly(4) length = struct.unpack('>I', length_data)[0] if length > ControlProtocol.MAX_MESSAGE_SIZE: raise ProtocolError(f"Message too large: {length}") # Read payload payload_data = await reader.readexactly(length) try: return json.loads(payload_data.decode('utf-8')) except json.JSONDecodeError as e: raise ProtocolError(f"Invalid JSON: {e}") @staticmethod async def write_message_async(writer, data: dict): """ Write one message to an async writer. Args: writer: asyncio StreamWriter data: Message dictionary to send """ message = ControlProtocol.encode_message(data) writer.write(message) await writer.drain() def make_request(request_id: int, command: str, args: list = None) -> dict: """ Create a request message. Args: request_id: Unique request identifier command: Command name (e.g., "show workers") args: Optional list of arguments Returns: Request dictionary """ return { "id": request_id, "command": command, "args": args or [], } def make_response(request_id: int, data: dict = None) -> dict: """ Create a success response message. Args: request_id: Request identifier being responded to data: Response data Returns: Response dictionary """ return { "id": request_id, "status": "ok", "data": data or {}, } def make_error_response(request_id: int, error: str) -> dict: """ Create an error response message. Args: request_id: Request identifier being responded to error: Error message Returns: Error response dictionary """ return { "id": request_id, "status": "error", "error": error, } benoitc-gunicorn-f5fb19e/gunicorn/ctl/server.py000066400000000000000000000223011514360242400217470ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Control Socket Server Runs in the arbiter process and accepts commands via Unix socket. Uses asyncio in a background thread to handle client connections. """ import asyncio import os import shlex import threading from gunicorn.ctl.handlers import CommandHandlers from gunicorn.ctl.protocol import ( ControlProtocol, make_response, make_error_response, ) class ControlSocketServer: """ Control socket server running in arbiter process. The server runs an asyncio event loop in a background thread, accepting connections and dispatching commands to handlers. """ def __init__(self, arbiter, socket_path, socket_mode=0o600): """ Initialize control socket server. Args: arbiter: The Gunicorn arbiter instance socket_path: Path for the Unix socket socket_mode: Permission mode for socket (default 0o600) """ self.arbiter = arbiter self.socket_path = socket_path self.socket_mode = socket_mode self.handlers = CommandHandlers(arbiter) self._server = None self._loop = None self._thread = None self._running = False def start(self): """Start server in background thread with asyncio event loop.""" if self._running: return self._running = True self._thread = threading.Thread(target=self._run_loop, daemon=True) self._thread.start() def stop(self): """Stop server and cleanup socket.""" if not self._running: return self._running = False if self._loop and self._server: # Schedule server close in the loop self._loop.call_soon_threadsafe(self._shutdown) if self._thread: self._thread.join(timeout=2.0) self._thread = None # Clean up socket file if os.path.exists(self.socket_path): try: os.unlink(self.socket_path) except OSError: pass def _shutdown(self): """Shutdown server (called from event loop thread).""" if self._server: self._server.close() def _run_loop(self): """Run the asyncio event loop in background thread.""" try: asyncio.run(self._serve()) except Exception as e: if self.arbiter.log: self.arbiter.log.error("Control server error: %s", e) async def _serve(self): """Main async server loop.""" self._loop = asyncio.get_running_loop() # Remove socket if it exists if os.path.exists(self.socket_path): os.unlink(self.socket_path) # Create Unix socket server self._server = await asyncio.start_unix_server( self._handle_client, path=self.socket_path ) # Set socket permissions os.chmod(self.socket_path, self.socket_mode) if self.arbiter.log: self.arbiter.log.info("Control socket listening at %s", self.socket_path) try: async with self._server: await self._server.serve_forever() except asyncio.CancelledError: pass finally: if os.path.exists(self.socket_path): try: os.unlink(self.socket_path) except OSError: pass async def _handle_client(self, reader, writer): """ Handle client connection. Args: reader: asyncio StreamReader writer: asyncio StreamWriter """ try: while self._running: try: message = await asyncio.wait_for( ControlProtocol.read_message_async(reader), timeout=300.0 # 5 minute idle timeout ) except asyncio.TimeoutError: # Client idle too long, close connection break except asyncio.IncompleteReadError: # Client disconnected break except Exception: # Protocol error break # Process command response = await self._dispatch(message) # Send response await ControlProtocol.write_message_async(writer, response) except Exception as e: if self.arbiter.log: self.arbiter.log.debug("Control client error: %s", e) finally: writer.close() try: await writer.wait_closed() except Exception: pass async def _dispatch(self, message: dict) -> dict: """ Dispatch command to appropriate handler. Args: message: Request message dict Returns: Response dictionary """ request_id = message.get("id", 0) command = message.get("command", "").strip() args = message.get("args", []) if not command: return make_error_response(request_id, "Empty command") try: # Parse command (e.g., "show workers" or "worker add 2") parts = shlex.split(command) if args: parts.extend(str(a) for a in args) if not parts: return make_error_response(request_id, "Empty command") # Route to handler result = self._execute_command(parts) return make_response(request_id, result) except ValueError as e: return make_error_response(request_id, f"Invalid argument: {e}") except Exception as e: if self.arbiter.log: self.arbiter.log.exception("Command error") return make_error_response(request_id, f"Command failed: {e}") def _execute_command(self, parts: list) -> dict: # pylint: disable=too-many-return-statements """ Execute a parsed command. Args: parts: Command parts (e.g., ["show", "workers"]) Returns: Handler result dictionary """ if not parts: raise ValueError("Empty command") cmd = parts[0].lower() rest = parts[1:] # Map commands to handlers if cmd == "show": return self._handle_show(rest) elif cmd == "worker": return self._handle_worker(rest) elif cmd == "dirty": return self._handle_dirty(rest) elif cmd == "reload": return self.handlers.reload() elif cmd == "reopen": return self.handlers.reopen() elif cmd == "shutdown": mode = rest[0] if rest else "graceful" return self.handlers.shutdown(mode) elif cmd == "help": return self.handlers.help() else: raise ValueError(f"Unknown command: {cmd}") def _handle_show(self, args: list) -> dict: """Handle 'show' commands.""" if not args: raise ValueError("Missing show target (all|workers|dirty|config|stats|listeners)") target = args[0].lower() if target == "all": return self.handlers.show_all() elif target == "workers": return self.handlers.show_workers() elif target == "dirty": return self.handlers.show_dirty() elif target == "config": return self.handlers.show_config() elif target == "stats": return self.handlers.show_stats() elif target == "listeners": return self.handlers.show_listeners() else: raise ValueError(f"Unknown show target: {target}") def _handle_worker(self, args: list) -> dict: """Handle 'worker' commands.""" if not args: raise ValueError("Missing worker action (add|remove|kill)") action = args[0].lower() action_args = args[1:] if action == "add": count = int(action_args[0]) if action_args else 1 return self.handlers.worker_add(count) elif action == "remove": count = int(action_args[0]) if action_args else 1 return self.handlers.worker_remove(count) elif action == "kill": if not action_args: raise ValueError("Missing PID for worker kill") pid = int(action_args[0]) return self.handlers.worker_kill(pid) else: raise ValueError(f"Unknown worker action: {action}") def _handle_dirty(self, args: list) -> dict: """Handle 'dirty' commands.""" if not args: raise ValueError("Missing dirty action (add|remove)") action = args[0].lower() action_args = args[1:] if action == "add": count = int(action_args[0]) if action_args else 1 return self.handlers.dirty_add(count) elif action == "remove": count = int(action_args[0]) if action_args else 1 return self.handlers.dirty_remove(count) else: raise ValueError(f"Unknown dirty action: {action}") benoitc-gunicorn-f5fb19e/gunicorn/debug.py000066400000000000000000000043271514360242400207550ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """The debug module contains utilities and functions for better debugging Gunicorn.""" import sys import linecache import re import inspect __all__ = ['spew', 'unspew'] _token_spliter = re.compile(r'\W+') class Spew: def __init__(self, trace_names=None, show_values=True): self.trace_names = trace_names self.show_values = show_values def __call__(self, frame, event, arg): if event == 'line': lineno = frame.f_lineno if '__file__' in frame.f_globals: filename = frame.f_globals['__file__'] if (filename.endswith('.pyc') or filename.endswith('.pyo')): filename = filename[:-1] name = frame.f_globals['__name__'] line = linecache.getline(filename, lineno) else: name = '[unknown]' try: src = inspect.getsourcelines(frame) line = src[lineno] except OSError: line = 'Unknown code named [%s]. VM instruction #%d' % ( frame.f_code.co_name, frame.f_lasti) if self.trace_names is None or name in self.trace_names: print('%s:%s: %s' % (name, lineno, line.rstrip())) if not self.show_values: return self details = [] tokens = _token_spliter.split(line) for tok in tokens: if tok in frame.f_globals: details.append('%s=%r' % (tok, frame.f_globals[tok])) if tok in frame.f_locals: details.append('%s=%r' % (tok, frame.f_locals[tok])) if details: print("\t%s" % ' '.join(details)) return self def spew(trace_names=None, show_values=False): """Install a trace hook which writes incredibly detailed logs about what code is being executed to stdout. """ sys.settrace(Spew(trace_names, show_values)) def unspew(): """Remove the trace hook installed by spew. """ sys.settrace(None) benoitc-gunicorn-f5fb19e/gunicorn/dirty/000077500000000000000000000000001514360242400204425ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/dirty/__init__.py000066400000000000000000000037411514360242400225600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Arbiters - Separate process pool for long-running operations. Dirty Arbiters provide a separate process pool for executing long-running, blocking operations (AI model loading, heavy computation) without blocking HTTP workers. Inspired by Erlang's dirty schedulers. Key Properties: - Completely separate from HTTP workers - can be killed/restarted independently - Stateful - loaded resources persist in dirty worker memory - Message-passing IPC via Unix sockets with JSON serialization - Explicit execute() API (no hidden IPC) - Asyncio-based for clean concurrent handling and future streaming support """ from .errors import ( DirtyError, DirtyTimeoutError, DirtyConnectionError, DirtyWorkerError, DirtyAppError, DirtyAppNotFoundError, DirtyProtocolError, ) from .app import DirtyApp from .client import ( DirtyClient, get_dirty_client, get_dirty_client_async, set_dirty_socket_path, close_dirty_client, close_dirty_client_async, ) # Stash (shared state between workers) from . import stash from .stash import ( StashClient, StashTable, StashError, StashTableNotFoundError, StashKeyNotFoundError, ) # Internal imports used by gunicorn core (not part of public API) from .arbiter import DirtyArbiter __all__ = [ # Errors "DirtyError", "DirtyTimeoutError", "DirtyConnectionError", "DirtyWorkerError", "DirtyAppError", "DirtyAppNotFoundError", "DirtyProtocolError", # App base class "DirtyApp", # Client "DirtyClient", "get_dirty_client", "get_dirty_client_async", "close_dirty_client", "close_dirty_client_async", # Stash (shared state) "stash", "StashClient", "StashTable", "StashError", "StashTableNotFoundError", "StashKeyNotFoundError", # Internal (used by gunicorn core) "DirtyArbiter", "set_dirty_socket_path", ] benoitc-gunicorn-f5fb19e/gunicorn/dirty/app.py000066400000000000000000000247341514360242400216060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Application Base Class Provides the DirtyApp base class that all dirty applications must inherit from, and utilities for loading dirty apps from import paths. """ import importlib import sys from .errors import DirtyAppError, DirtyAppNotFoundError class DirtyApp: """ Base class for dirty applications. Dirty applications are loaded once when the dirty worker starts and persist in memory for the lifetime of the worker. They are designed for stateful resources like ML models, connection pools, etc. Lifecycle --------- 1. ``__init__()``: Called when the app is instantiated (once per worker) 2. ``init()``: Called after instantiation to initialize resources 3. ``__call__()``: Called for each request from HTTP workers 4. ``close()``: Called when the worker shuts down State Persistence ----------------- Instance variables persist across requests. This is the key feature that enables loading heavy resources once and reusing them:: class MLApp(DirtyApp): def init(self): self.model = load_model() # Loaded once, reused forever def predict(self, data): return self.model.predict(data) # Same model for all requests Thread Safety ------------- With ``dirty_threads=1`` (default): Only one request runs at a time, so no thread safety concerns. With ``dirty_threads > 1``: Multiple requests may run concurrently in the same worker. Your app MUST be thread-safe. Options: - Use locks: ``threading.Lock()`` for shared state - Use thread-local: ``threading.local()`` for per-thread state - Use read-only state: Load models once in init(), never mutate Example:: import threading class ThreadSafeMLApp(DirtyApp): def __init__(self): self.models = {} self._lock = threading.Lock() def init(self): self.models['default'] = load_model('base-model') def load_model(self, name): with self._lock: if name not in self.models: self.models[name] = load_model(name) return {"loaded": True, "name": name} Worker Allocation ----------------- By default, all dirty workers load all apps. For apps that consume significant memory (like large ML models), you can limit how many workers load the app by setting the ``workers`` class attribute:: class HeavyModelApp(DirtyApp): workers = 2 # Only 2 workers will load this app def init(self): self.model = load_10gb_model() Subclasses should implement: - init(): Called once at worker startup to initialize resources - __call__(action, *args, **kwargs): Handle requests from HTTP workers - close(): Called at worker shutdown to cleanup resources """ # Number of workers that should load this app. # None means all workers (default, backward compatible). # Set to an integer to limit how many workers load this app. workers = None def init(self): """ Initialize the application. Called once when the dirty worker starts, after the app instance is created. Use this for expensive initialization like loading ML models, establishing database connections, etc. This method is called in the child process after fork, so it's safe to initialize non-fork-safe resources here. """ def __call__(self, action, *args, **kwargs): """ Handle a request from an HTTP worker. Args: action: The action/method name to execute *args: Positional arguments for the action **kwargs: Keyword arguments for the action Returns: The result of the action (must be JSON-serializable) Raises: ValueError: If the action is unknown Any exception: Will be caught and returned as DirtyAppError """ method = getattr(self, action, None) if method is None or action.startswith('_'): raise ValueError(f"Unknown action: {action}") return method(*args, **kwargs) def close(self): """ Cleanup resources. Called when the dirty worker is shutting down. Use this to release resources like database connections, unload models, etc. """ def parse_dirty_app_spec(spec): """ Parse a dirty app specification. Supports two formats: - ``"module:Class"`` - standard format, all workers load the app - ``"module:Class:N"`` - worker-limited format, only N workers load the app Args: spec: The app specification string Returns: tuple: (import_path, worker_count) - import_path: The "module:Class" part for importing - worker_count: Integer limit or None for all workers Raises: DirtyAppError: If the spec format is invalid or worker_count is < 1 Examples:: >>> parse_dirty_app_spec("myapp:App") ("myapp:App", None) >>> parse_dirty_app_spec("myapp:App:2") ("myapp:App", 2) >>> parse_dirty_app_spec("myapp.sub:App:1") ("myapp.sub:App", 1) """ if ':' not in spec: raise DirtyAppError( f"Invalid import path format: {spec}. " f"Expected 'module.path:ClassName' or 'module.path:ClassName:N'", app_path=spec ) parts = spec.split(':') # Standard format: "module:Class" or "module.sub:Class" if len(parts) == 2: return (spec, None) # Worker-limited format: "module:Class:N" if len(parts) == 3: module_path, class_name, count_str = parts import_path = f"{module_path}:{class_name}" # Validate the worker count try: worker_count = int(count_str) except ValueError: raise DirtyAppError( f"Invalid worker count in spec: {spec}. " f"Expected integer, got '{count_str}'", app_path=spec ) if worker_count < 1: raise DirtyAppError( f"Invalid worker count in spec: {spec}. " f"Worker count must be >= 1, got {worker_count}", app_path=spec ) return (import_path, worker_count) # Too many colons raise DirtyAppError( f"Invalid import path format: {spec}. " f"Expected 'module.path:ClassName' or 'module.path:ClassName:N'", app_path=spec ) def load_dirty_app(import_path): """ Load a dirty app class from an import path. Args: import_path: String in format 'module.path:ClassName' Returns: An instance of the dirty app class Raises: DirtyAppNotFoundError: If the module or class cannot be found DirtyAppError: If the class is not a valid DirtyApp subclass """ if ':' not in import_path: raise DirtyAppError( f"Invalid import path format: {import_path}. " f"Expected 'module.path:ClassName'", app_path=import_path ) module_path, class_name = import_path.rsplit(':', 1) try: # Import the module if module_path in sys.modules: module = sys.modules[module_path] else: module = importlib.import_module(module_path) except ImportError as e: raise DirtyAppNotFoundError(import_path) from e # Get the class from the module try: app_class = getattr(module, class_name) except AttributeError: raise DirtyAppNotFoundError(import_path) from None # Validate it's a class if not isinstance(app_class, type): raise DirtyAppError( f"{import_path} is not a class", app_path=import_path ) # Create an instance try: app = app_class() except Exception as e: raise DirtyAppError( f"Failed to instantiate {import_path}: {e}", app_path=import_path ) from e # Validate it has the required methods required_methods = ['init', '__call__', 'close'] for method_name in required_methods: if not hasattr(app, method_name) or not callable(getattr(app, method_name)): raise DirtyAppError( f"{import_path} is missing required method: {method_name}", app_path=import_path ) return app def load_dirty_apps(import_paths): """ Load multiple dirty apps from a list of import paths. Args: import_paths: List of import path strings Returns: dict: Mapping of import path to app instance Raises: DirtyAppError: If any app fails to load """ apps = {} for import_path in import_paths: apps[import_path] = load_dirty_app(import_path) return apps def get_app_workers_attribute(import_path): """ Get the workers class attribute from a dirty app without instantiating it. This is used by the arbiter to determine how many workers should load an app based on the class attribute, without needing to actually load the app. Args: import_path: String in format 'module.path:ClassName' Returns: The workers class attribute value (int or None) Raises: DirtyAppNotFoundError: If the module or class cannot be found DirtyAppError: If the import path format is invalid """ if ':' not in import_path: raise DirtyAppError( f"Invalid import path format: {import_path}. " f"Expected 'module.path:ClassName'", app_path=import_path ) module_path, class_name = import_path.rsplit(':', 1) try: # Import the module if module_path in sys.modules: module = sys.modules[module_path] else: module = importlib.import_module(module_path) except ImportError as e: raise DirtyAppNotFoundError(import_path) from e # Get the class from the module try: app_class = getattr(module, class_name) except AttributeError: raise DirtyAppNotFoundError(import_path) from None # Validate it's a class if not isinstance(app_class, type): raise DirtyAppError( f"{import_path} is not a class", app_path=import_path ) # Return the workers attribute (defaults to None if not set) return getattr(app_class, 'workers', None) benoitc-gunicorn-f5fb19e/gunicorn/dirty/arbiter.py000066400000000000000000001211501514360242400224440ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Arbiter Process Asyncio-based arbiter that manages the dirty worker pool and routes requests from HTTP workers to available dirty workers. """ import asyncio import errno import fnmatch import os import signal import tempfile import time from gunicorn import util from .app import get_app_workers_attribute, parse_dirty_app_spec from .errors import ( DirtyError, DirtyNoWorkersAvailableError, DirtyTimeoutError, DirtyWorkerError, ) from .protocol import ( DirtyProtocol, make_error_response, make_response, STASH_OP_PUT, STASH_OP_GET, STASH_OP_DELETE, STASH_OP_KEYS, STASH_OP_CLEAR, STASH_OP_INFO, STASH_OP_ENSURE, STASH_OP_DELETE_TABLE, STASH_OP_TABLES, STASH_OP_EXISTS, MANAGE_OP_ADD, MANAGE_OP_REMOVE, ) from .worker import DirtyWorker class DirtyArbiter: """ Dirty arbiter that manages the dirty worker pool. The arbiter runs an asyncio event loop and handles: - Spawning and managing dirty worker processes - Accepting connections from HTTP workers - Routing requests to available dirty workers - Monitoring worker health via heartbeat """ SIGNALS = [getattr(signal, "SIG%s" % x) for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 CHLD".split()] # Worker boot error code WORKER_BOOT_ERROR = 3 def __init__(self, cfg, log, socket_path=None, pidfile=None): """ Initialize the dirty arbiter. Args: cfg: Gunicorn config log: Logger socket_path: Path to the arbiter's Unix socket pidfile: Well-known PID file location for orphan detection """ self.cfg = cfg self.log = log self.pid = None self.ppid = os.getpid() self.pidfile = pidfile # Well-known location for orphan detection # Use a temp directory for sockets self.tmpdir = tempfile.mkdtemp(prefix="gunicorn-dirty-") self.socket_path = socket_path or os.path.join( self.tmpdir, "arbiter.sock" ) self.workers = {} # pid -> DirtyWorker self.worker_sockets = {} # pid -> socket_path self.worker_connections = {} # pid -> (reader, writer) self.worker_queues = {} # pid -> asyncio.Queue self.worker_consumers = {} # pid -> asyncio.Task self._worker_rr_index = 0 # Round-robin index for worker selection self.worker_age = 0 self.alive = True self.num_workers = self.cfg.dirty_workers # Dynamic count for TTIN/TTOU self._server = None self._loop = None self._pending_requests = {} # request_id -> Future # Per-app worker allocation tracking # Maps import_path -> {import_path, worker_count, original_spec} self.app_specs = {} # Maps import_path -> set of worker PIDs that have loaded the app self.app_worker_map = {} # Maps worker_pid -> list of import_paths loaded by this worker self.worker_app_map = {} # Per-app round-robin indices for routing self._app_rr_indices = {} # Queue of app lists from dead workers to respawn with same apps self._pending_respawns = [] # Stash (shared state) - global tables stored in arbiter # Maps table_name -> dict of data self.stash_tables = {} # Parse app specs on init self._parse_app_specs() def _parse_app_specs(self): """ Parse all app specifications from config. Populates self.app_specs with parsed information about each app, including the import path and worker count limits. Worker count priority: 1. Config override (e.g., "module:Class:2") - highest priority 2. Class attribute (e.g., workers = 2 on the class) 3. None (all workers) - default """ for spec in self.cfg.dirty_apps: import_path, worker_count = parse_dirty_app_spec(spec) # If no config override, check class attribute if worker_count is None: try: worker_count = get_app_workers_attribute(import_path) except Exception as e: # Log but don't fail - we'll discover the error when loading self.log.warning( "Could not read workers attribute from %s: %s", import_path, e ) self.app_specs[import_path] = { 'import_path': import_path, 'worker_count': worker_count, 'original_spec': spec, } # Initialize the app_worker_map for this app self.app_worker_map[import_path] = set() def _get_minimum_workers(self): """ Calculate minimum number of workers required by app specs. Returns the maximum worker_count across all apps that have limits. Apps with worker_count=None don't impose a minimum. Returns: int: Minimum workers required (at least 1) """ min_required = 1 for spec in self.app_specs.values(): worker_count = spec['worker_count'] if worker_count is not None: min_required = max(min_required, worker_count) return min_required def _get_apps_for_new_worker(self): """ Determine which apps a new worker should load. Returns a list of import paths for apps that need more workers. Apps with workers=None (all workers) are always included. Apps with worker limits are included only if they haven't reached their limit yet. Returns: List of import paths to load, or empty list if no apps need workers """ app_paths = [] for import_path, spec in self.app_specs.items(): worker_count = spec['worker_count'] current_workers = len(self.app_worker_map.get(import_path, set())) # None means all workers should load this app if worker_count is None: app_paths.append(import_path) # Otherwise check if we've reached the limit elif current_workers < worker_count: app_paths.append(import_path) return app_paths def _register_worker_apps(self, worker_pid, app_paths): """ Register which apps a worker has loaded. Updates both app_worker_map and worker_app_map to track the bidirectional relationship between workers and apps. Args: worker_pid: The PID of the worker app_paths: List of app import paths loaded by this worker """ self.worker_app_map[worker_pid] = list(app_paths) for app_path in app_paths: if app_path not in self.app_worker_map: self.app_worker_map[app_path] = set() self.app_worker_map[app_path].add(worker_pid) def _unregister_worker(self, worker_pid): """ Unregister a worker's apps when it exits. Removes the worker from all tracking maps. Args: worker_pid: The PID of the worker to unregister """ # Get the apps this worker had app_paths = self.worker_app_map.pop(worker_pid, []) # Remove worker from each app's worker set for app_path in app_paths: if app_path in self.app_worker_map: self.app_worker_map[app_path].discard(worker_pid) def run(self): """Run the dirty arbiter (blocking call).""" self.pid = os.getpid() self.log.info("Dirty arbiter starting (pid: %s)", self.pid) # Write PID to well-known location for orphan detection if self.pidfile: try: with open(self.pidfile, 'w') as f: f.write(str(self.pid)) except IOError as e: self.log.warning("Failed to write PID file: %s", e) # Set socket path env var for dirty workers (enables stash access) os.environ['GUNICORN_DIRTY_SOCKET'] = self.socket_path # Call hook self.cfg.on_dirty_starting(self) # Set up signal handlers self.init_signals() # Set process title util._setproctitle("dirty-arbiter") try: asyncio.run(self._run_async()) except KeyboardInterrupt: pass finally: self._cleanup_sync() def init_signals(self): """Set up signal handlers.""" for sig in self.SIGNALS: signal.signal(sig, signal.SIG_DFL) signal.signal(signal.SIGTERM, self._signal_handler) signal.signal(signal.SIGQUIT, self._signal_handler) signal.signal(signal.SIGINT, self._signal_handler) signal.signal(signal.SIGHUP, self._signal_handler) signal.signal(signal.SIGUSR1, self._signal_handler) signal.signal(signal.SIGCHLD, self._signal_handler) signal.signal(signal.SIGTTIN, self._signal_handler) signal.signal(signal.SIGTTOU, self._signal_handler) def _signal_handler(self, sig, frame): """Handle signals.""" if sig == signal.SIGCHLD: # Child exited - will be handled in reap_workers if self._loop: self._loop.call_soon_threadsafe( lambda: asyncio.create_task(self._handle_sigchld()) ) return if sig == signal.SIGUSR1: # Reopen log files self.log.reopen_files() return if sig == signal.SIGHUP: # Reload workers if self._loop: self._loop.call_soon_threadsafe( lambda: asyncio.create_task(self.reload()) ) return if sig == signal.SIGTTIN: # Increase number of workers self.num_workers += 1 self.log.info("SIGTTIN: Increasing dirty workers to %s", self.num_workers) if self._loop: self._loop.call_soon_threadsafe( lambda: asyncio.create_task(self.manage_workers()) ) return if sig == signal.SIGTTOU: # Decrease number of workers (respecting minimum) min_workers = self._get_minimum_workers() if self.num_workers <= min_workers: self.log.warning( "SIGTTOU: Cannot decrease below %s workers " "(required by app specs)", min_workers ) return self.num_workers -= 1 self.log.info("SIGTTOU: Decreasing dirty workers to %s", self.num_workers) if self._loop: self._loop.call_soon_threadsafe( lambda: asyncio.create_task(self.manage_workers()) ) return # Shutdown signals self.alive = False if self._loop: self._loop.call_soon_threadsafe(self._shutdown) def _shutdown(self): """Initiate async shutdown.""" if self._server: self._server.close() async def _run_async(self): """Main async loop - start server, manage workers.""" self._loop = asyncio.get_running_loop() # Remove socket if it exists if os.path.exists(self.socket_path): os.unlink(self.socket_path) # Start Unix socket server for HTTP workers self._server = await asyncio.start_unix_server( self.handle_client, path=self.socket_path ) # Make socket accessible os.chmod(self.socket_path, 0o600) self.log.info("Dirty arbiter listening on %s", self.socket_path) # Spawn initial workers await self.manage_workers() # Start periodic tasks monitor_task = asyncio.create_task(self._worker_monitor()) try: async with self._server: await self._server.serve_forever() except (asyncio.CancelledError, RuntimeError): # RuntimeError raised when server.close() is called during serve_forever() pass finally: monitor_task.cancel() try: await monitor_task except asyncio.CancelledError: pass await self.stop() async def _worker_monitor(self): """Periodically check worker health and manage pool.""" while self.alive: await asyncio.sleep(1.0) # Check if parent (main arbiter) died unexpectedly if os.getppid() != self.ppid: self.log.warning("Parent changed, shutting down dirty arbiter") self.alive = False self._shutdown() return await self.murder_workers() await self.manage_workers() async def _handle_sigchld(self): """Handle SIGCHLD - reap dead workers.""" self.reap_workers() # Only spawn new workers if we're still alive if self.alive: await self.manage_workers() async def handle_client(self, reader, writer): """ Handle a connection from an HTTP worker. Routes requests to available dirty workers and returns responses. Supports both regular responses and streaming (chunk-based) responses. Also handles stash (shared state) operations. """ self.log.debug("New client connection from HTTP worker") try: while self.alive: try: message = await DirtyProtocol.read_message_async(reader) except asyncio.IncompleteReadError: break msg_type = message.get("type") # Handle stash operations if msg_type == DirtyProtocol.MSG_TYPE_STASH: await self.handle_stash_request(message, writer) # Handle status queries elif msg_type == DirtyProtocol.MSG_TYPE_STATUS: await self.handle_status_request(message, writer) # Handle worker management (add/remove workers) elif msg_type == DirtyProtocol.MSG_TYPE_MANAGE: await self.handle_manage_request(message, writer) else: # Route request to a dirty worker - pass writer for streaming await self.route_request(message, writer) except Exception as e: self.log.error("Client connection error: %s", e) finally: writer.close() try: await writer.wait_closed() except Exception: pass async def route_request(self, request, client_writer): """ Route a request to an available dirty worker via queue. Each worker has a dedicated queue and consumer task. Requests are submitted to the queue and processed sequentially by the consumer. For streaming responses, messages (chunks) are forwarded directly to the client_writer as they arrive from the worker. Args: request: Request message dict client_writer: StreamWriter to send responses to client """ request_id = request.get("id", "unknown") app_path = request.get("app_path") # Find an available worker (filtered by app if specified) worker_pid = await self._get_available_worker(app_path) if worker_pid is None: # Distinguish between no workers at all vs. no workers for this app if not self.workers: error = DirtyError("No dirty workers available") elif app_path and self.app_specs: # Per-app allocation is configured and no workers have this app error = DirtyNoWorkersAvailableError(app_path) else: error = DirtyError("No dirty workers available") response = make_error_response(request_id, error) await DirtyProtocol.write_message_async(client_writer, response) return # Get queue (start consumer if needed) if worker_pid not in self.worker_queues: await self._start_worker_consumer(worker_pid) queue = self.worker_queues[worker_pid] future = asyncio.get_running_loop().create_future() # Submit request to queue with client writer for streaming support await queue.put((request, client_writer, future)) # Wait for completion (streaming messages forwarded by consumer) try: await future except Exception as e: response = make_error_response( request_id, DirtyWorkerError(f"Request failed: {e}", worker_id=worker_pid) ) await DirtyProtocol.write_message_async(client_writer, response) async def _start_worker_consumer(self, worker_pid): """Start a consumer task for a worker's request queue.""" queue = asyncio.Queue() self.worker_queues[worker_pid] = queue async def consumer(): while self.alive: try: request, client_writer, future = await queue.get() try: await self._execute_on_worker( worker_pid, request, client_writer ) if not future.done(): future.set_result(None) except Exception as e: if not future.done(): future.set_exception(e) finally: queue.task_done() except asyncio.CancelledError: break task = asyncio.create_task(consumer()) self.worker_consumers[worker_pid] = task async def _execute_on_worker(self, worker_pid, request, client_writer): """ Execute request on a specific worker (called by consumer). Handles both regular responses and streaming (chunk-based) responses. For streaming, chunk and end messages are forwarded directly to the client_writer as they arrive from the worker. """ request_id = request.get("id", "unknown") try: reader, writer = await self._get_worker_connection(worker_pid) await DirtyProtocol.write_message_async(writer, request) # Read messages until we get a response, end, or error while True: try: message = await asyncio.wait_for( DirtyProtocol.read_message_async(reader), timeout=self.cfg.dirty_timeout ) except asyncio.TimeoutError: response = make_error_response( request_id, DirtyTimeoutError("Worker timeout", self.cfg.dirty_timeout) ) await DirtyProtocol.write_message_async(client_writer, response) return msg_type = message.get("type") # Forward chunk messages to client if msg_type == DirtyProtocol.MSG_TYPE_CHUNK: await DirtyProtocol.write_message_async(client_writer, message) continue # Forward end message and complete if msg_type == DirtyProtocol.MSG_TYPE_END: await DirtyProtocol.write_message_async(client_writer, message) return # Forward response or error and complete if msg_type in (DirtyProtocol.MSG_TYPE_RESPONSE, DirtyProtocol.MSG_TYPE_ERROR): await DirtyProtocol.write_message_async(client_writer, message) return # Unknown message type - log and continue self.log.warning("Unknown message type from worker: %s", msg_type) except Exception as e: self.log.error("Error executing on worker %s: %s", worker_pid, e) self._close_worker_connection(worker_pid) response = make_error_response( request_id, DirtyWorkerError(f"Worker communication failed: {e}", worker_id=worker_pid) ) await DirtyProtocol.write_message_async(client_writer, response) async def _get_available_worker(self, app_path=None): """ Get an available worker PID using round-robin selection. If app_path is provided, only returns workers that have loaded that specific app. Uses per-app round-robin to ensure fair distribution among eligible workers. Args: app_path: Optional import path of the target app. If None, returns any worker using global round-robin. Returns: Worker PID or None if no eligible workers are available. """ # Determine eligible workers if app_path and self.app_specs: # Per-app allocation is configured - must return a worker # that has this specific app if app_path in self.app_worker_map: eligible_pids = list(self.app_worker_map[app_path]) else: # App not known or no workers have it return None else: # No specific app requested, or no app specs configured # (backward compatible) - any worker will do eligible_pids = list(self.workers.keys()) if not eligible_pids: return None # Per-app round-robin for fairness if app_path and self.app_specs: idx = self._app_rr_indices.get(app_path, 0) self._app_rr_indices[app_path] = (idx + 1) % len(eligible_pids) else: idx = self._worker_rr_index self._worker_rr_index = (idx + 1) % len(eligible_pids) return eligible_pids[idx % len(eligible_pids)] async def _get_worker_connection(self, worker_pid): """Get or create connection to a worker.""" if worker_pid in self.worker_connections: return self.worker_connections[worker_pid] socket_path = self.worker_sockets.get(worker_pid) if not socket_path: raise DirtyError(f"No socket for worker {worker_pid}") # Wait for socket to be available for _ in range(50): # 5 seconds max if os.path.exists(socket_path): break await asyncio.sleep(0.1) else: raise DirtyError(f"Worker socket not ready: {socket_path}") reader, writer = await asyncio.open_unix_connection(socket_path) self.worker_connections[worker_pid] = (reader, writer) return reader, writer def _close_worker_connection(self, worker_pid): """Close connection to a worker.""" if worker_pid in self.worker_connections: _reader, writer = self.worker_connections.pop(worker_pid) writer.close() # ------------------------------------------------------------------------- # Stash (shared state) operations - handled directly in arbiter # ------------------------------------------------------------------------- async def handle_status_request(self, message, client_writer): """ Handle a status query request. Returns information about the dirty arbiter and its workers. Args: message: Status request message client_writer: StreamWriter to send response to client """ request_id = message.get("id", "unknown") now = time.monotonic() workers_info = [] for pid, worker in self.workers.items(): try: last_update = worker.tmp.last_update() last_heartbeat = round(now - last_update, 2) except (OSError, ValueError, AttributeError): last_heartbeat = None workers_info.append({ "pid": pid, "age": worker.age, "apps": getattr(worker, 'app_paths', []), "booted": getattr(worker, 'booted', False), "last_heartbeat": last_heartbeat, }) workers_info.sort(key=lambda w: w["age"]) result = { "arbiter_pid": self.pid, "workers": workers_info, "worker_count": len(workers_info), "apps": list(self.app_specs.keys()) if self.app_specs else [], } response = make_response(request_id, result) await DirtyProtocol.write_message_async(client_writer, response) async def handle_manage_request(self, message, client_writer): """ Handle a worker management request. Supports adding or removing dirty workers via protocol messages. Args: message: Manage request message client_writer: StreamWriter to send response to client """ request_id = message.get("id", "unknown") op = message.get("op") count = max(1, int(message.get("count", 1))) try: if op == MANAGE_OP_ADD: # Add workers - only loads apps that need more workers spawned = 0 for _ in range(count): result = self.spawn_worker() if result is not None: self.num_workers += 1 spawned += 1 await asyncio.sleep(0.1) # Provide feedback about why no workers were spawned if spawned == 0: result = { "success": True, "operation": "add", "requested": count, "spawned": 0, "reason": "All apps have reached their worker limits", "total_workers": len(self.workers), "target_workers": self.num_workers, } else: result = { "success": True, "operation": "add", "requested": count, "spawned": spawned, "total_workers": len(self.workers), "target_workers": self.num_workers, } elif op == MANAGE_OP_REMOVE: # Remove workers (similar to TTOU signal but via message) min_workers = self._get_minimum_workers() removed = 0 for _ in range(count): if self.num_workers <= min_workers: break if len(self.workers) <= 1: break self.num_workers -= 1 # Kill oldest worker oldest_pid = min(self.workers.keys(), key=lambda p: self.workers[p].age) self.kill_worker(oldest_pid, signal.SIGTERM) removed += 1 await asyncio.sleep(0.1) result = { "success": True, "operation": "remove", "requested": count, "removed": removed, "total_workers": len(self.workers), "target_workers": self.num_workers, } else: error = DirtyError(f"Unknown manage operation: {op}") response = make_error_response(request_id, error) await DirtyProtocol.write_message_async(client_writer, response) return self.log.info("Worker management: %s %d workers (spawned/removed: %d)", "add" if op == MANAGE_OP_ADD else "remove", count, result.get("spawned", result.get("removed", 0))) response = make_response(request_id, result) await DirtyProtocol.write_message_async(client_writer, response) except Exception as e: self.log.error("Manage operation error: %s", e) response = make_error_response(request_id, DirtyError(str(e))) await DirtyProtocol.write_message_async(client_writer, response) async def handle_stash_request(self, message, client_writer): """ Handle a stash operation directly in the arbiter. All stash tables are stored in arbiter memory for simplicity and fast access. Args: message: Stash operation message client_writer: StreamWriter to send response to client """ request_id = message.get("id", "unknown") op = message.get("op") table = message.get("table", "") key = message.get("key") value = message.get("value") pattern = message.get("pattern") try: result = None if op == STASH_OP_PUT: # Auto-create table if needed if table not in self.stash_tables: self.stash_tables[table] = {} self.stash_tables[table][key] = value result = True elif op == STASH_OP_GET: if table not in self.stash_tables: result = {"error": "key_not_found"} elif key not in self.stash_tables[table]: result = {"error": "key_not_found"} else: result = self.stash_tables[table][key] elif op == STASH_OP_DELETE: if table in self.stash_tables and key in self.stash_tables[table]: del self.stash_tables[table][key] result = True else: result = False elif op == STASH_OP_KEYS: if table not in self.stash_tables: result = [] else: all_keys = list(self.stash_tables[table].keys()) if pattern: all_keys = [k for k in all_keys if fnmatch.fnmatch(str(k), pattern)] result = all_keys elif op == STASH_OP_CLEAR: if table in self.stash_tables: self.stash_tables[table].clear() result = True elif op == STASH_OP_INFO: if table not in self.stash_tables: result = {"error": "table_not_found"} else: result = { "size": len(self.stash_tables[table]), "table": table, } elif op == STASH_OP_ENSURE: if table not in self.stash_tables: self.stash_tables[table] = {} result = True elif op == STASH_OP_DELETE_TABLE: if table in self.stash_tables: del self.stash_tables[table] result = True else: result = False elif op == STASH_OP_TABLES: result = list(self.stash_tables.keys()) elif op == STASH_OP_EXISTS: if table not in self.stash_tables: result = False elif key is None: result = True else: result = key in self.stash_tables[table] else: error = DirtyError(f"Unknown stash operation: {op}") response = make_error_response(request_id, error) await DirtyProtocol.write_message_async(client_writer, response) return # Handle error results if isinstance(result, dict) and "error" in result: error_type = result["error"] if error_type == "table_not_found": error = DirtyError(f"Table not found: {table}") elif error_type == "key_not_found": error = DirtyError(f"Key not found: {key}") else: error = DirtyError(str(result)) error.error_type = f"Stash{error_type.title().replace('_', '')}Error" response = make_error_response(request_id, error) else: response = make_response(request_id, result) await DirtyProtocol.write_message_async(client_writer, response) except Exception as e: self.log.error("Stash operation error: %s", e) response = make_error_response(request_id, DirtyError(str(e))) await DirtyProtocol.write_message_async(client_writer, response) async def manage_workers(self): """Maintain the number of dirty workers.""" if not self.alive: return num_workers = self.num_workers # Spawn workers if needed while self.alive and len(self.workers) < num_workers: result = self.spawn_worker() if result is None: # No apps need more workers - stop spawning break await asyncio.sleep(0.1) # Kill excess workers while len(self.workers) > num_workers: # Kill oldest worker oldest_pid = min(self.workers.keys(), key=lambda p: self.workers[p].age) self.kill_worker(oldest_pid, signal.SIGTERM) await asyncio.sleep(0.1) def spawn_worker(self, force_all_apps=False): """ Spawn a new dirty worker. Worker app assignment follows these priorities: 1. If there are pending respawns (from dead workers), use those apps 2. Otherwise, determine apps for a new worker based on allocation 3. If force_all_apps=True, spawn with all apps regardless of limits Args: force_all_apps: If True, spawn worker with all apps ignoring limits Returns: Worker PID in parent process, or None if no apps need workers """ # Priority 1: Respawn dead worker with same apps if self._pending_respawns: app_paths = self._pending_respawns.pop(0) elif force_all_apps: # Force spawn with all apps (used by TTIN signal) app_paths = list(self.app_specs.keys()) else: # Priority 2: New worker for initial pool app_paths = self._get_apps_for_new_worker() if not app_paths: self.log.debug("No apps need more workers, skipping spawn") return None self.worker_age += 1 socket_path = os.path.join( self.tmpdir, f"worker-{self.worker_age}.sock" ) worker = DirtyWorker( age=self.worker_age, ppid=self.pid, app_paths=app_paths, # Only assigned apps, not all apps cfg=self.cfg, log=self.log, socket_path=socket_path ) pid = os.fork() if pid != 0: # Parent process worker.pid = pid self.workers[pid] = worker self.worker_sockets[pid] = socket_path # Register which apps this worker has self._register_worker_apps(pid, app_paths) self.cfg.dirty_post_fork(self, worker) self.log.info("Spawned dirty worker (pid: %s) with apps: %s", pid, app_paths) return pid # Child process - use os._exit() to avoid asyncio cleanup issues worker.pid = os.getpid() try: util._setproctitle(f"dirty-worker [{self.cfg.proc_name}]") worker.init_process() os._exit(0) except SystemExit as e: os._exit(e.code if e.code is not None else 0) except Exception: self.log.exception("Exception in dirty worker process") if not worker.booted: os._exit(self.WORKER_BOOT_ERROR) os._exit(1) def kill_worker(self, pid, sig): """Kill a worker by PID.""" try: os.kill(pid, sig) except OSError as e: if e.errno == errno.ESRCH: self._cleanup_worker(pid) def _cleanup_worker(self, pid): """ Clean up after a worker exits. Saves the dead worker's app list to pending respawns so the replacement worker gets the same apps. """ self._close_worker_connection(pid) # Cancel consumer task if pid in self.worker_consumers: self.worker_consumers[pid].cancel() del self.worker_consumers[pid] # Remove queue self.worker_queues.pop(pid, None) # Save dead worker's apps for respawn BEFORE unregistering if pid in self.worker_app_map: dead_apps = list(self.worker_app_map[pid]) if dead_apps: self._pending_respawns.append(dead_apps) # Now safe to unregister the worker's apps self._unregister_worker(pid) worker = self.workers.pop(pid, None) if worker: self.cfg.dirty_worker_exit(self, worker) socket_path = self.worker_sockets.pop(pid, None) if socket_path and os.path.exists(socket_path): try: os.unlink(socket_path) except OSError: pass async def murder_workers(self): """Kill workers that have timed out.""" if not self.cfg.dirty_timeout: return for pid, worker in list(self.workers.items()): try: if time.monotonic() - worker.tmp.last_update() <= self.cfg.dirty_timeout: continue except (OSError, ValueError): continue if not worker.aborted: self.log.critical("DIRTY WORKER TIMEOUT (pid:%s)", pid) worker.aborted = True self.kill_worker(pid, signal.SIGABRT) else: self.kill_worker(pid, signal.SIGKILL) def reap_workers(self): """Reap dead worker processes.""" try: while True: wpid, status = os.waitpid(-1, os.WNOHANG) if not wpid: break exitcode = None if os.WIFEXITED(status): exitcode = os.WEXITSTATUS(status) elif os.WIFSIGNALED(status): sig = os.WTERMSIG(status) self.log.warning("Dirty worker (pid:%s) killed by signal %s", wpid, sig) if exitcode == self.WORKER_BOOT_ERROR: self.log.error("Dirty worker failed to boot (pid:%s)", wpid) self._cleanup_worker(wpid) self.log.info("Dirty worker exited (pid:%s)", wpid) except OSError as e: if e.errno != errno.ECHILD: raise async def reload(self): """Reload workers (SIGHUP handling).""" self.log.info("Reloading dirty workers") # Spawn new workers for _ in range(self.cfg.dirty_workers): self.spawn_worker() await asyncio.sleep(0.1) # Kill old workers old_workers = list(self.workers.keys()) for pid in old_workers[self.cfg.dirty_workers:]: self.kill_worker(pid, signal.SIGTERM) async def stop(self, graceful=True): """Stop all workers.""" # Cancel all consumer tasks for task in self.worker_consumers.values(): task.cancel() sig = signal.SIGTERM if graceful else signal.SIGQUIT limit = time.time() + self.cfg.dirty_graceful_timeout # Signal all workers for pid in list(self.workers.keys()): self.kill_worker(pid, sig) # Wait for workers to exit while self.workers and time.time() < limit: self.reap_workers() await asyncio.sleep(0.1) # Force kill remaining workers for pid in list(self.workers.keys()): self.kill_worker(pid, signal.SIGKILL) self.reap_workers() def _cleanup_sync(self): """Synchronous cleanup on exit.""" # Remove PID file if self.pidfile and os.path.exists(self.pidfile): try: os.unlink(self.pidfile) except OSError: pass # Clean up socket if os.path.exists(self.socket_path): try: os.unlink(self.socket_path) except OSError: pass # Clean up temp directory try: for f in os.listdir(self.tmpdir): os.unlink(os.path.join(self.tmpdir, f)) os.rmdir(self.tmpdir) except OSError: pass self.log.info("Dirty arbiter exiting (pid: %s)", self.pid) benoitc-gunicorn-f5fb19e/gunicorn/dirty/client.py000066400000000000000000000573661514360242400223130ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Client Client for HTTP workers to communicate with the dirty worker pool. Provides both sync and async APIs. """ import asyncio import contextvars import os import socket import threading import time import uuid from .errors import ( DirtyConnectionError, DirtyError, DirtyTimeoutError, ) from .protocol import ( DirtyProtocol, make_request, ) class DirtyClient: """ Client for calling dirty workers from HTTP workers. Provides both sync and async APIs. The sync API is for traditional sync workers (sync, gthread), while the async API is for async workers (asgi, gevent, eventlet). """ def __init__(self, socket_path, timeout=30.0): """ Initialize the dirty client. Args: socket_path: Path to the dirty arbiter's Unix socket timeout: Default timeout for operations in seconds """ self.socket_path = socket_path self.timeout = timeout self._sock = None self._reader = None self._writer = None self._lock = threading.Lock() # ------------------------------------------------------------------------- # Sync API (for sync HTTP workers) # ------------------------------------------------------------------------- def connect(self): """ Establish sync socket connection to arbiter. Raises: DirtyConnectionError: If connection fails """ if self._sock is not None: return try: self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self._sock.settimeout(self.timeout) self._sock.connect(self.socket_path) except (socket.error, OSError) as e: self._sock = None raise DirtyConnectionError( f"Failed to connect to dirty arbiter: {e}", socket_path=self.socket_path ) from e def execute(self, app_path, action, *args, **kwargs): """ Execute an action on a dirty app (sync/blocking). Args: app_path: Import path of the dirty app (e.g., 'myapp.ml:MLApp') action: Action to call on the app *args: Positional arguments **kwargs: Keyword arguments Returns: Result from the dirty app action Raises: DirtyConnectionError: If connection fails DirtyTimeoutError: If operation times out DirtyError: If execution fails """ with self._lock: return self._execute_locked(app_path, action, args, kwargs) def _execute_locked(self, app_path, action, args, kwargs): """Execute while holding the lock.""" # Ensure connected if self._sock is None: self.connect() # Build request request_id = str(uuid.uuid4()) request = make_request( request_id=request_id, app_path=app_path, action=action, args=args, kwargs=kwargs ) try: # Send request DirtyProtocol.write_message(self._sock, request) # Receive response response = DirtyProtocol.read_message(self._sock) # Handle response return self._handle_response(response) except socket.timeout: self._close_socket() raise DirtyTimeoutError( "Timeout waiting for dirty app response", timeout=self.timeout ) except Exception as e: self._close_socket() if isinstance(e, DirtyError): raise raise DirtyConnectionError(f"Communication error: {e}") from e def stream(self, app_path, action, *args, **kwargs): """ Stream results from a dirty app action (sync). This method returns an iterator that yields chunks from a streaming response. Use this for actions that return generators. Args: app_path: Import path of the dirty app (e.g., 'myapp.ml:MLApp') action: Action to call on the app *args: Positional arguments **kwargs: Keyword arguments Yields: Chunks of data from the streaming response Raises: DirtyConnectionError: If connection fails DirtyTimeoutError: If operation times out DirtyError: If execution fails Example:: for chunk in client.stream("myapp.llm:LLMApp", "generate", prompt): print(chunk, end="", flush=True) """ return DirtyStreamIterator(self, app_path, action, args, kwargs) def _handle_response(self, response): """Handle response message, extracting result or raising error.""" msg_type = response.get("type") if msg_type == DirtyProtocol.MSG_TYPE_RESPONSE: return response.get("result") elif msg_type == DirtyProtocol.MSG_TYPE_ERROR: error_info = response.get("error", {}) error = DirtyError.from_dict(error_info) raise error else: raise DirtyError(f"Unknown response type: {msg_type}") def _close_socket(self): """Close the socket connection.""" if self._sock is not None: try: self._sock.close() except Exception: pass self._sock = None def close(self): """Close the sync connection.""" with self._lock: self._close_socket() # ------------------------------------------------------------------------- # Async API (for async HTTP workers) # ------------------------------------------------------------------------- async def connect_async(self): """ Establish async connection to arbiter. Raises: DirtyConnectionError: If connection fails """ if self._writer is not None: return try: self._reader, self._writer = await asyncio.wait_for( asyncio.open_unix_connection(self.socket_path), timeout=self.timeout ) except asyncio.TimeoutError: raise DirtyTimeoutError( "Timeout connecting to dirty arbiter", timeout=self.timeout ) except (OSError, ConnectionError) as e: raise DirtyConnectionError( f"Failed to connect to dirty arbiter: {e}", socket_path=self.socket_path ) from e async def execute_async(self, app_path, action, *args, **kwargs): """ Execute an action on a dirty app (async/non-blocking). Args: app_path: Import path of the dirty app action: Action to call on the app *args: Positional arguments **kwargs: Keyword arguments Returns: Result from the dirty app action Raises: DirtyConnectionError: If connection fails DirtyTimeoutError: If operation times out DirtyError: If execution fails """ # Ensure connected if self._writer is None: await self.connect_async() # Build request request_id = str(uuid.uuid4()) request = make_request( request_id=request_id, app_path=app_path, action=action, args=args, kwargs=kwargs ) try: # Send request await DirtyProtocol.write_message_async(self._writer, request) # Receive response with timeout response = await asyncio.wait_for( DirtyProtocol.read_message_async(self._reader), timeout=self.timeout ) # Handle response return self._handle_response(response) except asyncio.TimeoutError: await self._close_async() raise DirtyTimeoutError( "Timeout waiting for dirty app response", timeout=self.timeout ) except Exception as e: await self._close_async() if isinstance(e, DirtyError): raise raise DirtyConnectionError(f"Communication error: {e}") from e def stream_async(self, app_path, action, *args, **kwargs): """ Stream results from a dirty app action (async). This method returns an async iterator that yields chunks from a streaming response. Use this for actions that return generators. Args: app_path: Import path of the dirty app (e.g., 'myapp.ml:MLApp') action: Action to call on the app *args: Positional arguments **kwargs: Keyword arguments Yields: Chunks of data from the streaming response Raises: DirtyConnectionError: If connection fails DirtyTimeoutError: If operation times out DirtyError: If execution fails Example:: async for chunk in client.stream_async("myapp.llm:LLMApp", "generate", prompt): await response.write(chunk) """ return DirtyAsyncStreamIterator(self, app_path, action, args, kwargs) async def _close_async(self): """Close the async connection.""" if self._writer is not None: try: self._writer.close() await self._writer.wait_closed() except Exception: pass self._writer = None self._reader = None async def close_async(self): """Close the async connection.""" await self._close_async() # ------------------------------------------------------------------------- # Context managers # ------------------------------------------------------------------------- def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() async def __aenter__(self): await self.connect_async() return self async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close_async() # ============================================================================= # Stream Iterator classes # ============================================================================= class DirtyStreamIterator: """ Iterator for streaming responses from dirty workers (sync). This class is returned by `DirtyClient.stream()` and yields chunks from a streaming response until the end message is received. Uses a deadline-based timeout approach: - Total stream timeout: limits entire stream duration - Idle timeout: limits gap between chunks (defaults to total timeout) """ # Default idle timeout between chunks (seconds) DEFAULT_IDLE_TIMEOUT = 30.0 # Threshold for applying per-read timeout (seconds) # When remaining time is above this, use a larger timeout for efficiency _TIMEOUT_THRESHOLD = 5.0 def __init__(self, client, app_path, action, args, kwargs, idle_timeout=None): self.client = client self.app_path = app_path self.action = action self.args = args self.kwargs = kwargs self._started = False self._exhausted = False self._request_id = None self._deadline = None self._last_chunk_time = None # Idle timeout: max time between chunks self._idle_timeout = ( idle_timeout if idle_timeout is not None else min(self.DEFAULT_IDLE_TIMEOUT, client.timeout) ) def __iter__(self): return self def __next__(self): if self._exhausted: raise StopIteration if not self._started: self._start_request() self._started = True return self._read_next_chunk() def _start_request(self): """Send the initial request to the arbiter.""" with self.client._lock: if self.client._sock is None: self.client.connect() # Set deadline for entire stream now = time.monotonic() self._deadline = now + self.client.timeout self._last_chunk_time = now self._request_id = str(uuid.uuid4()) request = make_request( self._request_id, self.app_path, self.action, args=self.args, kwargs=self.kwargs, ) DirtyProtocol.write_message(self.client._sock, request) def _read_next_chunk(self): """Read the next message from the stream.""" with self.client._lock: # Check total stream deadline now = time.monotonic() if now >= self._deadline: self._exhausted = True raise DirtyTimeoutError( "Stream exceeded total timeout", timeout=self.client.timeout ) remaining = self._deadline - now # Set socket timeout based on remaining time # Fast path: use larger timeout when plenty of time remains if remaining > self._TIMEOUT_THRESHOLD: read_timeout = self._TIMEOUT_THRESHOLD else: read_timeout = min(remaining, self._idle_timeout) try: self.client._sock.settimeout(read_timeout) response = DirtyProtocol.read_message(self.client._sock) except socket.timeout: # Check which timeout was hit now = time.monotonic() if now >= self._deadline: self._exhausted = True raise DirtyTimeoutError( "Stream exceeded total timeout", timeout=self.client.timeout ) idle_duration = now - self._last_chunk_time self._exhausted = True raise DirtyTimeoutError( f"Timeout waiting for next chunk (idle {idle_duration:.1f}s)", timeout=self._idle_timeout ) except Exception as e: self._exhausted = True self.client._close_socket() raise DirtyConnectionError(f"Communication error: {e}") from e # Update last chunk time for idle tracking self._last_chunk_time = time.monotonic() msg_type = response.get("type") # Chunk message - return the data if msg_type == DirtyProtocol.MSG_TYPE_CHUNK: return response.get("data") # End message - stop iteration if msg_type == DirtyProtocol.MSG_TYPE_END: self._exhausted = True raise StopIteration # Error message - raise exception if msg_type == DirtyProtocol.MSG_TYPE_ERROR: self._exhausted = True error_info = response.get("error", {}) raise DirtyError.from_dict(error_info) # Regular response - shouldn't happen for streaming, but handle it if msg_type == DirtyProtocol.MSG_TYPE_RESPONSE: self._exhausted = True # Return the result as the only chunk then stop raise StopIteration # Unknown type self._exhausted = True raise DirtyError(f"Unknown message type: {msg_type}") class DirtyAsyncStreamIterator: """ Async iterator for streaming responses from dirty workers. This class is returned by `DirtyClient.stream_async()` and yields chunks from a streaming response until the end message is received. Uses a deadline-based timeout approach for efficiency: - Total stream timeout: limits entire stream duration - Idle timeout: limits gap between chunks (defaults to total timeout) This avoids the overhead of asyncio.wait_for() on every chunk read. """ # Default idle timeout between chunks (seconds) DEFAULT_IDLE_TIMEOUT = 30.0 def __init__(self, client, app_path, action, args, kwargs, idle_timeout=None): self.client = client self.app_path = app_path self.action = action self.args = args self.kwargs = kwargs self._started = False self._exhausted = False self._request_id = None self._deadline = None self._last_chunk_time = None # Idle timeout: max time between chunks self._idle_timeout = ( idle_timeout if idle_timeout is not None else min(self.DEFAULT_IDLE_TIMEOUT, client.timeout) ) def __aiter__(self): return self async def __anext__(self): if self._exhausted: raise StopAsyncIteration if not self._started: await self._start_request() self._started = True return await self._read_next_chunk() async def _start_request(self): """Send the initial request to the arbiter.""" if self.client._writer is None: await self.client.connect_async() # Set deadline for entire stream now = time.monotonic() self._deadline = now + self.client.timeout self._last_chunk_time = now self._request_id = str(uuid.uuid4()) request = make_request( self._request_id, self.app_path, self.action, args=self.args, kwargs=self.kwargs, ) await DirtyProtocol.write_message_async(self.client._writer, request) # Threshold for applying timeout wrapper (seconds) # When remaining time is above this, skip timeout for performance _TIMEOUT_THRESHOLD = 5.0 async def _read_next_chunk(self): """Read the next message from the stream.""" # Calculate remaining time until deadline now = time.monotonic() # Check total stream deadline if now >= self._deadline: self._exhausted = True raise DirtyTimeoutError( "Stream exceeded total timeout", timeout=self.client.timeout ) remaining = self._deadline - now try: # Fast path: skip timeout wrapper when we have plenty of time # This avoids asyncio.wait_for() overhead for most chunks if remaining > self._TIMEOUT_THRESHOLD: response = await DirtyProtocol.read_message_async( self.client._reader ) else: # Near deadline: apply timeout protection read_timeout = min(remaining, self._idle_timeout) response = await asyncio.wait_for( DirtyProtocol.read_message_async(self.client._reader), timeout=read_timeout ) except asyncio.TimeoutError: self._exhausted = True now = time.monotonic() if now >= self._deadline: raise DirtyTimeoutError( "Stream exceeded total timeout", timeout=self.client.timeout ) idle_duration = now - self._last_chunk_time raise DirtyTimeoutError( f"Timeout waiting for next chunk (idle {idle_duration:.1f}s)", timeout=self._idle_timeout ) except Exception as e: self._exhausted = True await self.client._close_async() raise DirtyConnectionError(f"Communication error: {e}") from e # Update last chunk time for idle tracking self._last_chunk_time = time.monotonic() msg_type = response.get("type") # Chunk message - return the data if msg_type == DirtyProtocol.MSG_TYPE_CHUNK: return response.get("data") # End message - stop iteration if msg_type == DirtyProtocol.MSG_TYPE_END: self._exhausted = True raise StopAsyncIteration # Error message - raise exception if msg_type == DirtyProtocol.MSG_TYPE_ERROR: self._exhausted = True error_info = response.get("error", {}) raise DirtyError.from_dict(error_info) # Regular response - shouldn't happen for streaming if msg_type == DirtyProtocol.MSG_TYPE_RESPONSE: self._exhausted = True raise StopAsyncIteration # Unknown type self._exhausted = True raise DirtyError(f"Unknown message type: {msg_type}") # ============================================================================= # Thread-local and context-local client management # ============================================================================= # Thread-local storage for sync workers _thread_local = threading.local() # Context var for async workers _async_client_var: contextvars.ContextVar[DirtyClient] = contextvars.ContextVar( 'dirty_client' ) # Global socket path (set by arbiter) _dirty_socket_path = None def set_dirty_socket_path(path): """Set the global dirty socket path (called during initialization).""" global _dirty_socket_path # pylint: disable=global-statement _dirty_socket_path = path # Also set the stash socket path (uses same arbiter socket) from .stash import set_stash_socket_path set_stash_socket_path(path) def get_dirty_socket_path(): """Get the dirty socket path.""" if _dirty_socket_path is None: # Check environment variable path = os.environ.get('GUNICORN_DIRTY_SOCKET') if path: return path raise DirtyError( "Dirty socket path not configured. " "Make sure dirty_workers > 0 and dirty_apps are configured." ) return _dirty_socket_path def get_dirty_client(timeout=30.0) -> DirtyClient: """ Get or create a thread-local sync client. This is the recommended way to get a client in sync HTTP workers. Args: timeout: Timeout for operations in seconds Returns: DirtyClient: Thread-local client instance Example:: from gunicorn.dirty import get_dirty_client def my_view(request): client = get_dirty_client() result = client.execute("myapp.ml:MLApp", "inference", data) return result """ client = getattr(_thread_local, 'dirty_client', None) if client is None: socket_path = get_dirty_socket_path() client = DirtyClient(socket_path, timeout=timeout) _thread_local.dirty_client = client return client async def get_dirty_client_async(timeout=30.0) -> DirtyClient: """ Get or create a context-local async client. This is the recommended way to get a client in async HTTP workers. Args: timeout: Timeout for operations in seconds Returns: DirtyClient: Context-local client instance Example:: from gunicorn.dirty import get_dirty_client_async async def my_view(request): client = await get_dirty_client_async() result = await client.execute_async("myapp.ml:MLApp", "inference", data) return result """ try: client = _async_client_var.get() except LookupError: socket_path = get_dirty_socket_path() client = DirtyClient(socket_path, timeout=timeout) _async_client_var.set(client) return client def close_dirty_client(): """Close the thread-local client (call on worker exit).""" client = getattr(_thread_local, 'dirty_client', None) if client is not None: client.close() _thread_local.dirty_client = None async def close_dirty_client_async(): """Close the context-local async client.""" try: client = _async_client_var.get() await client.close_async() except LookupError: pass benoitc-gunicorn-f5fb19e/gunicorn/dirty/errors.py000066400000000000000000000144011514360242400223300ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Arbiters Error Classes Exception hierarchy for dirty worker pool operations. """ class DirtyError(Exception): """Base exception for all dirty arbiter errors.""" def __init__(self, message, details=None): self.message = message self.details = details or {} super().__init__(message) def __str__(self): if self.details: return f"{self.message}: {self.details}" return self.message def to_dict(self): """Serialize error for protocol transmission.""" return { "error_type": self.__class__.__name__, "message": self.message, "details": self.details, } @classmethod def from_dict(cls, data): """Deserialize error from protocol transmission. Creates an error instance from a serialized dict. The returned error will be an instance of the appropriate subclass based on the error_type field, but constructed using the base DirtyError __init__ to preserve all details. """ error_classes = { "DirtyError": DirtyError, "DirtyTimeoutError": DirtyTimeoutError, "DirtyConnectionError": DirtyConnectionError, "DirtyWorkerError": DirtyWorkerError, "DirtyAppError": DirtyAppError, "DirtyAppNotFoundError": DirtyAppNotFoundError, "DirtyNoWorkersAvailableError": DirtyNoWorkersAvailableError, "DirtyProtocolError": DirtyProtocolError, } error_type = data.get("error_type", "DirtyError") error_class = error_classes.get(error_type, DirtyError) # Create instance and set attributes directly to bypass # subclass __init__ complexity while preserving error type error = Exception.__new__(error_class) error.message = data.get("message", "Unknown error") error.details = data.get("details") or {} Exception.__init__(error, error.message) # Set subclass-specific attributes from details if error_class == DirtyTimeoutError: error.timeout = error.details.get("timeout") elif error_class == DirtyConnectionError: error.socket_path = error.details.get("socket_path") elif error_class == DirtyWorkerError: error.worker_id = error.details.get("worker_id") error.traceback = error.details.get("traceback") elif error_class in (DirtyAppError, DirtyAppNotFoundError): error.app_path = error.details.get("app_path") error.action = error.details.get("action") error.traceback = error.details.get("traceback") elif error_class == DirtyNoWorkersAvailableError: error.app_path = error.details.get("app_path") return error class DirtyTimeoutError(DirtyError): """Raised when a dirty operation times out.""" def __init__(self, message="Operation timed out", timeout=None): details = {"timeout": timeout} if timeout else {} super().__init__(message, details) self.timeout = timeout class DirtyConnectionError(DirtyError): """Raised when connection to dirty arbiter fails.""" def __init__(self, message="Connection failed", socket_path=None): details = {"socket_path": socket_path} if socket_path else {} super().__init__(message, details) self.socket_path = socket_path class DirtyWorkerError(DirtyError): """Raised when a dirty worker encounters an error.""" def __init__(self, message, worker_id=None, traceback=None): details = {} if worker_id is not None: details["worker_id"] = worker_id if traceback: details["traceback"] = traceback super().__init__(message, details) self.worker_id = worker_id self.traceback = traceback class DirtyAppError(DirtyError): """Raised when a dirty app encounters an error during execution.""" def __init__(self, message, app_path=None, action=None, traceback=None): details = {} if app_path: details["app_path"] = app_path if action: details["action"] = action if traceback: details["traceback"] = traceback super().__init__(message, details) self.app_path = app_path self.action = action self.traceback = traceback class DirtyAppNotFoundError(DirtyAppError): """Raised when a dirty app is not found.""" def __init__(self, app_path): super().__init__(f"Dirty app not found: {app_path}", app_path=app_path) class DirtyNoWorkersAvailableError(DirtyError): """ Raised when no workers are available for the requested app. This exception is raised when a request targets an app that has worker limits configured, and no workers with that app are currently available (e.g., all workers for that app crashed and haven't been respawned yet). Web applications can catch this exception to provide graceful degradation, such as queuing requests for retry or showing a maintenance page. Example:: from gunicorn.dirty import get_dirty_client from gunicorn.dirty.errors import DirtyNoWorkersAvailableError def my_view(request): client = get_dirty_client() try: result = client.execute("myapp.ml:HeavyModel", "predict", data) except DirtyNoWorkersAvailableError as e: return {"error": "Service temporarily unavailable", "app": e.app_path} """ def __init__(self, app_path, message=None): if message is None: message = f"No workers available for app: {app_path}" super().__init__(message, details={"app_path": app_path}) self.app_path = app_path class DirtyProtocolError(DirtyError): """Raised when there is a protocol-level error.""" def __init__(self, message="Protocol error", raw_data=None): details = {} if raw_data is not None: # Truncate raw data for safety if isinstance(raw_data, bytes): raw_data = raw_data[:100].hex() details["raw_data"] = str(raw_data)[:200] super().__init__(message, details) benoitc-gunicorn-f5fb19e/gunicorn/dirty/protocol.py000066400000000000000000000604361514360242400226660ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Worker Binary Protocol Binary message framing over Unix sockets, inspired by OpenBSD msgctl/msgsnd. Replaces JSON protocol for efficient binary data transfer. Header Format (16 bytes): +--------+--------+--------+--------+--------+--------+--------+--------+ | Magic (2B) | Ver(1) | MType | Payload Length (4B) | +--------+--------+--------+--------+--------+--------+--------+--------+ | Request ID (8 bytes) | +--------+--------+--------+--------+--------+--------+--------+--------+ - Magic: 0x47 0x44 ("GD" for Gunicorn Dirty) - Version: 0x01 - MType: Message type (REQUEST, RESPONSE, ERROR, CHUNK, END) - Length: Payload size (big-endian uint32, max 64MB) - Request ID: uint64 (replaces UUID string) Payload is TLV-encoded (see tlv.py). """ import asyncio import socket import struct from .errors import DirtyProtocolError from .tlv import TLVEncoder # Protocol constants MAGIC = b"GD" # 0x47 0x44 VERSION = 0x01 # Message types (1 byte) MSG_TYPE_REQUEST = 0x01 MSG_TYPE_RESPONSE = 0x02 MSG_TYPE_ERROR = 0x03 MSG_TYPE_CHUNK = 0x04 MSG_TYPE_END = 0x05 MSG_TYPE_STASH = 0x10 # Stash operations (shared state between workers) MSG_TYPE_STATUS = 0x11 # Status query for arbiter/workers MSG_TYPE_MANAGE = 0x12 # Worker management (add/remove workers) # Message type names (for backwards compatibility with old API) MSG_TYPE_REQUEST_STR = "request" MSG_TYPE_RESPONSE_STR = "response" MSG_TYPE_ERROR_STR = "error" MSG_TYPE_CHUNK_STR = "chunk" MSG_TYPE_END_STR = "end" MSG_TYPE_STASH_STR = "stash" MSG_TYPE_STATUS_STR = "status" MSG_TYPE_MANAGE_STR = "manage" # Map int types to string names MSG_TYPE_TO_STR = { MSG_TYPE_REQUEST: MSG_TYPE_REQUEST_STR, MSG_TYPE_RESPONSE: MSG_TYPE_RESPONSE_STR, MSG_TYPE_ERROR: MSG_TYPE_ERROR_STR, MSG_TYPE_CHUNK: MSG_TYPE_CHUNK_STR, MSG_TYPE_END: MSG_TYPE_END_STR, MSG_TYPE_STASH: MSG_TYPE_STASH_STR, MSG_TYPE_STATUS: MSG_TYPE_STATUS_STR, MSG_TYPE_MANAGE: MSG_TYPE_MANAGE_STR, } # Map string names to int types MSG_TYPE_FROM_STR = {v: k for k, v in MSG_TYPE_TO_STR.items()} # Stash operation codes STASH_OP_PUT = 1 STASH_OP_GET = 2 STASH_OP_DELETE = 3 STASH_OP_KEYS = 4 STASH_OP_CLEAR = 5 STASH_OP_INFO = 6 STASH_OP_ENSURE = 7 STASH_OP_DELETE_TABLE = 8 STASH_OP_TABLES = 9 STASH_OP_EXISTS = 10 # Manage operation codes MANAGE_OP_ADD = 1 # Add/spawn workers MANAGE_OP_REMOVE = 2 # Remove/kill workers # Header format: Magic (2) + Version (1) + Type (1) + Length (4) + RequestID (8) = 16 HEADER_FORMAT = ">2sBBIQ" HEADER_SIZE = struct.calcsize(HEADER_FORMAT) # Maximum message size (64 MB) MAX_MESSAGE_SIZE = 64 * 1024 * 1024 class BinaryProtocol: """Binary message protocol for dirty worker IPC.""" # Export constants for external use HEADER_SIZE = HEADER_SIZE MAX_MESSAGE_SIZE = MAX_MESSAGE_SIZE MSG_TYPE_REQUEST = MSG_TYPE_REQUEST_STR MSG_TYPE_RESPONSE = MSG_TYPE_RESPONSE_STR MSG_TYPE_ERROR = MSG_TYPE_ERROR_STR MSG_TYPE_CHUNK = MSG_TYPE_CHUNK_STR MSG_TYPE_END = MSG_TYPE_END_STR MSG_TYPE_STASH = MSG_TYPE_STASH_STR MSG_TYPE_STATUS = MSG_TYPE_STATUS_STR MSG_TYPE_MANAGE = MSG_TYPE_MANAGE_STR @staticmethod def encode_header(msg_type: int, request_id: int, payload_length: int) -> bytes: """ Encode the 16-byte message header. Args: msg_type: Message type (MSG_TYPE_REQUEST, etc.) request_id: Unique request identifier (uint64) payload_length: Length of the TLV-encoded payload Returns: bytes: 16-byte header """ return struct.pack(HEADER_FORMAT, MAGIC, VERSION, msg_type, payload_length, request_id) @staticmethod def decode_header(data: bytes) -> tuple: """ Decode the 16-byte message header. Args: data: 16 bytes of header data Returns: tuple: (msg_type, request_id, payload_length) Raises: DirtyProtocolError: If header is invalid """ if len(data) < HEADER_SIZE: raise DirtyProtocolError( f"Header too short: {len(data)} bytes, expected {HEADER_SIZE}", raw_data=data ) magic, version, msg_type, length, request_id = struct.unpack( HEADER_FORMAT, data[:HEADER_SIZE] ) if magic != MAGIC: raise DirtyProtocolError( f"Invalid magic: {magic!r}, expected {MAGIC!r}", raw_data=data[:20] ) if version != VERSION: raise DirtyProtocolError( f"Unsupported protocol version: {version}, expected {VERSION}", raw_data=data[:20] ) if msg_type not in MSG_TYPE_TO_STR: raise DirtyProtocolError( f"Unknown message type: 0x{msg_type:02x}", raw_data=data[:20] ) if length > MAX_MESSAGE_SIZE: raise DirtyProtocolError( f"Message too large: {length} bytes (max: {MAX_MESSAGE_SIZE})" ) return msg_type, request_id, length @staticmethod def encode_request(request_id: int, app_path: str, action: str, args: tuple = None, kwargs: dict = None) -> bytes: """ Encode a request message. Args: request_id: Unique request identifier (uint64) app_path: Import path of the dirty app action: Action to call on the app args: Positional arguments kwargs: Keyword arguments Returns: bytes: Complete message (header + payload) """ payload_dict = { "app_path": app_path, "action": action, "args": list(args) if args else [], "kwargs": kwargs or {}, } payload = TLVEncoder.encode(payload_dict) header = BinaryProtocol.encode_header(MSG_TYPE_REQUEST, request_id, len(payload)) return header + payload @staticmethod def encode_response(request_id: int, result) -> bytes: """ Encode a success response message. Args: request_id: Request identifier this responds to result: Result value (must be TLV-serializable) Returns: bytes: Complete message (header + payload) """ payload_dict = {"result": result} payload = TLVEncoder.encode(payload_dict) header = BinaryProtocol.encode_header(MSG_TYPE_RESPONSE, request_id, len(payload)) return header + payload @staticmethod def encode_error(request_id: int, error) -> bytes: """ Encode an error response message. Args: request_id: Request identifier this responds to error: DirtyError instance, dict, or Exception Returns: bytes: Complete message (header + payload) """ from .errors import DirtyError if isinstance(error, DirtyError): error_dict = error.to_dict() elif isinstance(error, dict): error_dict = error else: error_dict = { "error_type": type(error).__name__, "message": str(error), "details": {}, } payload_dict = {"error": error_dict} payload = TLVEncoder.encode(payload_dict) header = BinaryProtocol.encode_header(MSG_TYPE_ERROR, request_id, len(payload)) return header + payload @staticmethod def encode_chunk(request_id: int, data) -> bytes: """ Encode a chunk message for streaming responses. Args: request_id: Request identifier this chunk belongs to data: Chunk data (must be TLV-serializable) Returns: bytes: Complete message (header + payload) """ payload_dict = {"data": data} payload = TLVEncoder.encode(payload_dict) header = BinaryProtocol.encode_header(MSG_TYPE_CHUNK, request_id, len(payload)) return header + payload @staticmethod def encode_end(request_id: int) -> bytes: """ Encode an end-of-stream message. Args: request_id: Request identifier this ends Returns: bytes: Complete message (header + empty payload) """ # End message has empty payload header = BinaryProtocol.encode_header(MSG_TYPE_END, request_id, 0) return header @staticmethod def encode_status(request_id: int) -> bytes: """ Encode a status query message. Args: request_id: Request identifier Returns: bytes: Complete message (header + empty payload) """ # Status query has empty payload header = BinaryProtocol.encode_header(MSG_TYPE_STATUS, request_id, 0) return header @staticmethod def encode_manage(request_id: int, op: int, count: int = 1) -> bytes: """ Encode a worker management message. Args: request_id: Request identifier op: Management operation (MANAGE_OP_ADD or MANAGE_OP_REMOVE) count: Number of workers to add/remove Returns: bytes: Complete message (header + payload) """ payload_dict = { "op": op, "count": count, } payload = TLVEncoder.encode(payload_dict) header = BinaryProtocol.encode_header(MSG_TYPE_MANAGE, request_id, len(payload)) return header + payload @staticmethod def encode_stash(request_id: int, op: int, table: str, key=None, value=None, pattern=None) -> bytes: """ Encode a stash operation message. Args: request_id: Unique request identifier (uint64) op: Stash operation code (STASH_OP_*) table: Table name key: Optional key for put/get/delete operations value: Optional value for put operation pattern: Optional pattern for keys operation Returns: bytes: Complete message (header + payload) """ payload_dict = { "op": op, "table": table, } if key is not None: payload_dict["key"] = key if value is not None: payload_dict["value"] = value if pattern is not None: payload_dict["pattern"] = pattern payload = TLVEncoder.encode(payload_dict) header = BinaryProtocol.encode_header(MSG_TYPE_STASH, request_id, len(payload)) return header + payload @staticmethod def decode_message(data: bytes) -> tuple: """ Decode a complete message (header + payload). Args: data: Complete message bytes Returns: tuple: (msg_type_str, request_id, payload_dict) msg_type_str is the string name (e.g., "request") payload_dict is the decoded TLV payload as a dict Raises: DirtyProtocolError: If message is malformed """ msg_type, request_id, length = BinaryProtocol.decode_header(data) if len(data) < HEADER_SIZE + length: raise DirtyProtocolError( f"Incomplete message: expected {HEADER_SIZE + length} bytes, " f"got {len(data)}", raw_data=data[:50] ) if length == 0: # End message has empty payload payload_dict = {} else: payload_data = data[HEADER_SIZE:HEADER_SIZE + length] try: payload_dict = TLVEncoder.decode_full(payload_data) except DirtyProtocolError: raise except Exception as e: raise DirtyProtocolError( f"Failed to decode TLV payload: {e}", raw_data=payload_data[:50] ) # Convert to dict format similar to old JSON protocol msg_type_str = MSG_TYPE_TO_STR[msg_type] return msg_type_str, request_id, payload_dict # ------------------------------------------------------------------------- # Async API (primary - for DirtyArbiter and DirtyWorker) # ------------------------------------------------------------------------- @staticmethod async def read_message_async(reader: asyncio.StreamReader) -> dict: """ Read a complete binary message from async stream. Args: reader: asyncio StreamReader Returns: dict: Message dict with 'type', 'id', and payload fields Raises: DirtyProtocolError: If read fails or message is malformed asyncio.IncompleteReadError: If connection closed mid-read """ # Read header try: header = await reader.readexactly(HEADER_SIZE) except asyncio.IncompleteReadError as e: if len(e.partial) == 0: # Clean close - no data was read raise raise DirtyProtocolError( f"Incomplete header: got {len(e.partial)} bytes, " f"expected {HEADER_SIZE}", raw_data=e.partial ) msg_type, request_id, length = BinaryProtocol.decode_header(header) # Read payload if length > 0: try: payload_data = await reader.readexactly(length) except asyncio.IncompleteReadError as e: raise DirtyProtocolError( f"Incomplete payload: got {len(e.partial)} bytes, " f"expected {length}", raw_data=e.partial ) try: payload_dict = TLVEncoder.decode_full(payload_data) except DirtyProtocolError: raise except Exception as e: raise DirtyProtocolError( f"Failed to decode TLV payload: {e}", raw_data=payload_data[:50] ) else: payload_dict = {} # Build response dict msg_type_str = MSG_TYPE_TO_STR[msg_type] result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) return result @staticmethod async def write_message_async(writer: asyncio.StreamWriter, message: dict) -> None: """ Write a message to async stream. Accepts dict format for backwards compatibility. Args: writer: asyncio StreamWriter message: Message dict with 'type', 'id', and payload fields Raises: DirtyProtocolError: If encoding fails ConnectionError: If write fails """ data = BinaryProtocol._encode_from_dict(message) writer.write(data) await writer.drain() # ------------------------------------------------------------------------- # Sync API (for HTTP workers that may not be async) # ------------------------------------------------------------------------- @staticmethod def _recv_exactly(sock: socket.socket, n: int) -> bytes: """ Receive exactly n bytes from a socket. Args: sock: Socket to read from n: Number of bytes to read Returns: bytes: Received data Raises: DirtyProtocolError: If read fails or connection closed """ data = b"" while len(data) < n: chunk = sock.recv(n - len(data)) if not chunk: if len(data) == 0: raise DirtyProtocolError("Connection closed") raise DirtyProtocolError( f"Connection closed after {len(data)} bytes, expected {n}", raw_data=data ) data += chunk return data @staticmethod def read_message(sock: socket.socket) -> dict: """ Read a complete message from socket (sync). Args: sock: Socket to read from Returns: dict: Message dict with 'type', 'id', and payload fields Raises: DirtyProtocolError: If read fails or message is malformed """ # Read header header = BinaryProtocol._recv_exactly(sock, HEADER_SIZE) msg_type, request_id, length = BinaryProtocol.decode_header(header) # Read payload if length > 0: payload_data = BinaryProtocol._recv_exactly(sock, length) try: payload_dict = TLVEncoder.decode_full(payload_data) except DirtyProtocolError: raise except Exception as e: raise DirtyProtocolError( f"Failed to decode TLV payload: {e}", raw_data=payload_data[:50] ) else: payload_dict = {} # Build response dict msg_type_str = MSG_TYPE_TO_STR[msg_type] result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) return result @staticmethod def write_message(sock: socket.socket, message: dict) -> None: """ Write a message to socket (sync). Args: sock: Socket to write to message: Message dict with 'type', 'id', and payload fields Raises: DirtyProtocolError: If encoding fails OSError: If write fails """ data = BinaryProtocol._encode_from_dict(message) sock.sendall(data) @staticmethod def _encode_from_dict(message: dict) -> bytes: # pylint: disable=too-many-return-statements """ Encode a message dict to binary format. Supports the old dict-based API for backwards compatibility. Args: message: Message dict with 'type', 'id', and payload fields Returns: bytes: Complete encoded message """ msg_type_str = message.get("type") request_id = message.get("id", 0) # Handle string or int request IDs if isinstance(request_id, str): # For backwards compat with UUID strings, hash to int request_id = hash(request_id) & 0xFFFFFFFFFFFFFFFF msg_type = MSG_TYPE_FROM_STR.get(msg_type_str) if msg_type is None: raise DirtyProtocolError(f"Unknown message type: {msg_type_str}") if msg_type == MSG_TYPE_REQUEST: return BinaryProtocol.encode_request( request_id, message.get("app_path", ""), message.get("action", ""), message.get("args"), message.get("kwargs") ) elif msg_type == MSG_TYPE_RESPONSE: return BinaryProtocol.encode_response( request_id, message.get("result") ) elif msg_type == MSG_TYPE_ERROR: return BinaryProtocol.encode_error( request_id, message.get("error", {}) ) elif msg_type == MSG_TYPE_CHUNK: return BinaryProtocol.encode_chunk( request_id, message.get("data") ) elif msg_type == MSG_TYPE_END: return BinaryProtocol.encode_end(request_id) elif msg_type == MSG_TYPE_STASH: return BinaryProtocol.encode_stash( request_id, message.get("op"), message.get("table", ""), message.get("key"), message.get("value"), message.get("pattern") ) elif msg_type == MSG_TYPE_STATUS: return BinaryProtocol.encode_status(request_id) elif msg_type == MSG_TYPE_MANAGE: return BinaryProtocol.encode_manage( request_id, message.get("op"), message.get("count", 1) ) else: raise DirtyProtocolError(f"Unhandled message type: {msg_type}") # ============================================================================= # Backwards Compatibility Aliases # ============================================================================= # Alias BinaryProtocol as DirtyProtocol for drop-in replacement DirtyProtocol = BinaryProtocol # Message builder helpers (backwards compatible with old API) def make_request(request_id, app_path: str, action: str, args: tuple = None, kwargs: dict = None) -> dict: """ Build a request message dict. Args: request_id: Unique request identifier (int or str) app_path: Import path of the dirty app (e.g., 'myapp.ml:MLApp') action: Action to call on the app args: Positional arguments kwargs: Keyword arguments Returns: dict: Request message dict """ return { "type": DirtyProtocol.MSG_TYPE_REQUEST, "id": request_id, "app_path": app_path, "action": action, "args": list(args) if args else [], "kwargs": kwargs or {}, } def make_response(request_id, result) -> dict: """ Build a success response message dict. Args: request_id: Request identifier this responds to result: Result value Returns: dict: Response message dict """ return { "type": DirtyProtocol.MSG_TYPE_RESPONSE, "id": request_id, "result": result, } def make_error_response(request_id, error) -> dict: """ Build an error response message dict. Args: request_id: Request identifier this responds to error: DirtyError instance or dict with error info Returns: dict: Error response message dict """ from .errors import DirtyError if isinstance(error, DirtyError): error_dict = error.to_dict() elif isinstance(error, dict): error_dict = error else: error_dict = { "error_type": type(error).__name__, "message": str(error), "details": {}, } return { "type": DirtyProtocol.MSG_TYPE_ERROR, "id": request_id, "error": error_dict, } def make_chunk_message(request_id, data) -> dict: """ Build a chunk message dict for streaming responses. Args: request_id: Request identifier this chunk belongs to data: Chunk data Returns: dict: Chunk message dict """ return { "type": DirtyProtocol.MSG_TYPE_CHUNK, "id": request_id, "data": data, } def make_end_message(request_id) -> dict: """ Build an end-of-stream message dict. Args: request_id: Request identifier this ends Returns: dict: End message dict """ return { "type": DirtyProtocol.MSG_TYPE_END, "id": request_id, } def make_stash_message(request_id, op: int, table: str, key=None, value=None, pattern=None) -> dict: """ Build a stash operation message dict. Args: request_id: Unique request identifier (int or str) op: Stash operation code (STASH_OP_*) table: Table name key: Optional key for put/get/delete operations value: Optional value for put operation pattern: Optional pattern for keys operation Returns: dict: Stash message dict """ msg = { "type": DirtyProtocol.MSG_TYPE_STASH, "id": request_id, "op": op, "table": table, } if key is not None: msg["key"] = key if value is not None: msg["value"] = value if pattern is not None: msg["pattern"] = pattern return msg def make_manage_message(request_id, op: int, count: int = 1) -> dict: """ Build a worker management message dict. Args: request_id: Unique request identifier (int or str) op: Management operation (MANAGE_OP_ADD or MANAGE_OP_REMOVE) count: Number of workers to add/remove Returns: dict: Manage message dict """ return { "type": DirtyProtocol.MSG_TYPE_MANAGE, "id": request_id, "op": op, "count": count, } benoitc-gunicorn-f5fb19e/gunicorn/dirty/stash.py000066400000000000000000000324161514360242400221440ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Stash - Global Shared State for Dirty Workers Provides simple key-value tables stored in the arbiter process. All workers can read and write to the same tables. Usage:: from gunicorn.dirty import stash # Basic operations - table is auto-created on first access stash.put("sessions", "user:1", {"name": "Alice", "role": "admin"}) user = stash.get("sessions", "user:1") stash.delete("sessions", "user:1") # Dict-like interface sessions = stash.table("sessions") sessions["user:1"] = {"name": "Alice"} user = sessions["user:1"] del sessions["user:1"] # Query operations keys = stash.keys("sessions") keys = stash.keys("sessions", pattern="user:*") # Table management stash.ensure("cache") # Explicit creation (idempotent) stash.clear("sessions") # Delete all entries stash.delete_table("sessions") # Delete the table itself tables = stash.tables() # List all tables Declarative usage in DirtyApp:: class MyApp(DirtyApp): stashes = ["sessions", "cache"] # Auto-created on arbiter start def __call__(self, action, *args, **kwargs): # Tables are ready to use stash.put("sessions", "key", "value") Note: Tables are stored in the arbiter process and are ephemeral. If the arbiter restarts, all data is lost. """ import threading import uuid from .errors import DirtyError from .protocol import ( DirtyProtocol, STASH_OP_PUT, STASH_OP_GET, STASH_OP_DELETE, STASH_OP_KEYS, STASH_OP_CLEAR, STASH_OP_INFO, STASH_OP_ENSURE, STASH_OP_DELETE_TABLE, STASH_OP_TABLES, STASH_OP_EXISTS, make_stash_message, ) class StashError(DirtyError): """Base exception for stash operations.""" class StashTableNotFoundError(StashError): """Raised when a table does not exist.""" def __init__(self, table_name): self.table_name = table_name super().__init__(f"Stash table not found: {table_name}") class StashKeyNotFoundError(StashError): """Raised when a key does not exist in a table.""" def __init__(self, table_name, key): self.table_name = table_name self.key = key super().__init__(f"Key not found in {table_name}: {key}") class StashClient: """ Client for stash operations. Communicates with the arbiter which stores all tables in memory. """ def __init__(self, socket_path, timeout=30.0): """ Initialize the stash client. Args: socket_path: Path to the dirty arbiter's Unix socket timeout: Default timeout for operations in seconds """ self.socket_path = socket_path self.timeout = timeout self._sock = None self._lock = threading.Lock() def _get_request_id(self): """Generate a unique request ID.""" return str(uuid.uuid4()) def _connect(self): """Establish connection to arbiter.""" import socket if self._sock is not None: return try: self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self._sock.settimeout(self.timeout) self._sock.connect(self.socket_path) except (socket.error, OSError) as e: self._sock = None raise StashError(f"Failed to connect to arbiter: {e}") from e def _close(self): """Close the connection.""" if self._sock is not None: try: self._sock.close() except Exception: pass self._sock = None def _execute(self, op, table, key=None, value=None, pattern=None): """ Execute a stash operation. Args: op: Operation code (STASH_OP_*) table: Table name key: Optional key value: Optional value pattern: Optional pattern for keys operation Returns: Result from the operation """ with self._lock: if self._sock is None: self._connect() request_id = self._get_request_id() message = make_stash_message( request_id, op, table, key=key, value=value, pattern=pattern ) try: DirtyProtocol.write_message(self._sock, message) response = DirtyProtocol.read_message(self._sock) msg_type = response.get("type") if msg_type == DirtyProtocol.MSG_TYPE_RESPONSE: return response.get("result") elif msg_type == DirtyProtocol.MSG_TYPE_ERROR: error_info = response.get("error", {}) error_type = error_info.get("error_type", "StashError") error_msg = error_info.get("message", "Unknown error") if error_type == "StashTableNotFoundError": raise StashTableNotFoundError(table) if error_type == "StashKeyNotFoundError": raise StashKeyNotFoundError(table, key) raise StashError(error_msg) else: raise StashError(f"Unexpected response type: {msg_type}") except Exception as e: self._close() if isinstance(e, StashError): raise raise StashError(f"Stash operation failed: {e}") from e # ------------------------------------------------------------------------- # Public API # ------------------------------------------------------------------------- def put(self, table, key, value): """ Store a value in a table. The table is automatically created if it doesn't exist. Args: table: Table name key: Key to store under value: Value to store (must be serializable) """ self._execute(STASH_OP_PUT, table, key=key, value=value) def get(self, table, key, default=None): """ Retrieve a value from a table. Args: table: Table name key: Key to retrieve default: Default value if key not found Returns: The stored value, or default if not found """ try: return self._execute(STASH_OP_GET, table, key=key) except StashKeyNotFoundError: return default def delete(self, table, key): """ Delete a key from a table. Args: table: Table name key: Key to delete Returns: True if key was deleted, False if it didn't exist """ return self._execute(STASH_OP_DELETE, table, key=key) def keys(self, table, pattern=None): """ Get all keys in a table, optionally filtered by pattern. Args: table: Table name pattern: Optional glob pattern (e.g., "user:*") Returns: List of keys """ return self._execute(STASH_OP_KEYS, table, pattern=pattern) def clear(self, table): """ Delete all entries in a table. Args: table: Table name """ self._execute(STASH_OP_CLEAR, table) def info(self, table): """ Get information about a table. Args: table: Table name Returns: Dict with table info (size, etc.) """ return self._execute(STASH_OP_INFO, table) def ensure(self, table): """ Ensure a table exists (create if not exists). This is idempotent - calling it multiple times is safe. Args: table: Table name """ self._execute(STASH_OP_ENSURE, table) def exists(self, table, key=None): """ Check if a table or key exists. Args: table: Table name key: Optional key to check within the table Returns: True if exists, False otherwise """ return self._execute(STASH_OP_EXISTS, table, key=key) def delete_table(self, table): """ Delete an entire table. Args: table: Table name """ self._execute(STASH_OP_DELETE_TABLE, table) def tables(self): """ List all tables. Returns: List of table names """ return self._execute(STASH_OP_TABLES, "") def table(self, name): """ Get a dict-like interface to a table. Args: name: Table name Returns: StashTable instance """ return StashTable(self, name) def close(self): """Close the client connection.""" with self._lock: self._close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() class StashTable: """ Dict-like interface to a stash table. Example:: sessions = stash.table("sessions") sessions["user:1"] = {"name": "Alice"} user = sessions["user:1"] del sessions["user:1"] # Iteration for key in sessions: print(key, sessions[key]) """ def __init__(self, client, name): self._client = client self._name = name @property def name(self): """Table name.""" return self._name def __getitem__(self, key): result = self._client.get(self._name, key) if result is None: # Check if key actually exists with None value if not self._client.exists(self._name, key): raise KeyError(key) return result def __setitem__(self, key, value): self._client.put(self._name, key, value) def __delitem__(self, key): if not self._client.delete(self._name, key): raise KeyError(key) def __contains__(self, key): return self._client.exists(self._name, key) def __iter__(self): return iter(self._client.keys(self._name)) def __len__(self): info = self._client.info(self._name) return info.get("size", 0) def get(self, key, default=None): """Get value with default.""" return self._client.get(self._name, key, default) def keys(self, pattern=None): """Get all keys, optionally filtered by pattern.""" return self._client.keys(self._name, pattern=pattern) def clear(self): """Delete all entries.""" self._client.clear(self._name) def items(self): """Iterate over (key, value) pairs.""" for key in self._client.keys(self._name): yield key, self._client.get(self._name, key) def values(self): """Iterate over values.""" for key in self._client.keys(self._name): yield self._client.get(self._name, key) # ============================================================================= # Global stash instance (module-level API) # ============================================================================= # Thread-local storage for stash clients _thread_local = threading.local() # Global socket path _stash_socket_path = None def set_stash_socket_path(path): """Set the global stash socket path (called during initialization).""" global _stash_socket_path # pylint: disable=global-statement _stash_socket_path = path def get_stash_socket_path(): """Get the stash socket path.""" import os if _stash_socket_path is None: # Check environment variable path = os.environ.get('GUNICORN_DIRTY_SOCKET') if path: return path raise StashError( "Stash socket path not configured. " "Make sure dirty_workers > 0 and dirty_apps are configured." ) return _stash_socket_path def _get_client(): """Get or create a thread-local stash client.""" client = getattr(_thread_local, 'stash_client', None) if client is None: socket_path = get_stash_socket_path() client = StashClient(socket_path) _thread_local.stash_client = client return client # Module-level functions that use the thread-local client def put(table, key, value): """Store a value in a table.""" _get_client().put(table, key, value) def get(table, key, default=None): """Retrieve a value from a table.""" return _get_client().get(table, key, default) def delete(table, key): """Delete a key from a table.""" return _get_client().delete(table, key) def keys(table, pattern=None): """Get all keys in a table.""" return _get_client().keys(table, pattern) def clear(table): """Delete all entries in a table.""" _get_client().clear(table) def info(table): """Get information about a table.""" return _get_client().info(table) def ensure(table): """Ensure a table exists.""" _get_client().ensure(table) def exists(table, key=None): """Check if a table or key exists.""" return _get_client().exists(table, key) def delete_table(table): """Delete an entire table.""" _get_client().delete_table(table) def tables(): """List all tables.""" return _get_client().tables() def table(name): """Get a dict-like interface to a table.""" return _get_client().table(name) benoitc-gunicorn-f5fb19e/gunicorn/dirty/tlv.py000066400000000000000000000242741514360242400216320ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ TLV (Type-Length-Value) Binary Encoder/Decoder Provides efficient binary serialization for dirty worker protocol messages. Inspired by OpenBSD msgctl/msgsnd message format. Type Codes: 0x00: None (no value bytes) 0x01: bool (1 byte: 0x00 or 0x01) 0x05: int64 (8 bytes big-endian signed) 0x06: float64 (8 bytes IEEE 754) 0x10: bytes (4-byte length + raw bytes) 0x11: string (4-byte length + UTF-8 encoded) 0x20: list (4-byte count + encoded elements) 0x21: dict (4-byte count + encoded key-value pairs) """ import struct from .errors import DirtyProtocolError # Type codes TYPE_NONE = 0x00 TYPE_BOOL = 0x01 TYPE_INT64 = 0x05 TYPE_FLOAT64 = 0x06 TYPE_BYTES = 0x10 TYPE_STRING = 0x11 TYPE_LIST = 0x20 TYPE_DICT = 0x21 # Maximum sizes for safety MAX_STRING_SIZE = 64 * 1024 * 1024 # 64 MB MAX_BYTES_SIZE = 64 * 1024 * 1024 # 64 MB MAX_LIST_SIZE = 1024 * 1024 # 1 million items MAX_DICT_SIZE = 1024 * 1024 # 1 million items class TLVEncoder: """ TLV binary encoder/decoder. Encodes Python values to binary TLV format and decodes back. Supports: None, bool, int, float, bytes, str, list, dict. """ @staticmethod def encode(value) -> bytes: # pylint: disable=too-many-return-statements """ Encode a Python value to TLV binary format. Args: value: Python value to encode (None, bool, int, float, bytes, str, list, or dict) Returns: bytes: TLV-encoded binary data Raises: DirtyProtocolError: If value type is not supported """ if value is None: return bytes([TYPE_NONE]) if isinstance(value, bool): # bool must come before int since bool is a subclass of int return bytes([TYPE_BOOL, 0x01 if value else 0x00]) if isinstance(value, int): return bytes([TYPE_INT64]) + struct.pack(">q", value) if isinstance(value, float): return bytes([TYPE_FLOAT64]) + struct.pack(">d", value) if isinstance(value, bytes): if len(value) > MAX_BYTES_SIZE: raise DirtyProtocolError( f"Bytes too large: {len(value)} bytes " f"(max: {MAX_BYTES_SIZE})" ) return bytes([TYPE_BYTES]) + struct.pack(">I", len(value)) + value if isinstance(value, str): encoded = value.encode("utf-8") if len(encoded) > MAX_STRING_SIZE: raise DirtyProtocolError( f"String too large: {len(encoded)} bytes " f"(max: {MAX_STRING_SIZE})" ) return bytes([TYPE_STRING]) + struct.pack(">I", len(encoded)) + encoded if isinstance(value, (list, tuple)): if len(value) > MAX_LIST_SIZE: raise DirtyProtocolError( f"List too large: {len(value)} items " f"(max: {MAX_LIST_SIZE})" ) parts = [bytes([TYPE_LIST]), struct.pack(">I", len(value))] for item in value: parts.append(TLVEncoder.encode(item)) return b"".join(parts) if isinstance(value, dict): if len(value) > MAX_DICT_SIZE: raise DirtyProtocolError( f"Dict too large: {len(value)} items " f"(max: {MAX_DICT_SIZE})" ) parts = [bytes([TYPE_DICT]), struct.pack(">I", len(value))] for k, v in value.items(): # Convert keys to strings (like JSON) if not isinstance(k, str): k = str(k) parts.append(TLVEncoder.encode(k)) parts.append(TLVEncoder.encode(v)) return b"".join(parts) raise DirtyProtocolError( f"Unsupported type for TLV encoding: {type(value).__name__}" ) @staticmethod def decode(data: bytes, offset: int = 0) -> tuple: # pylint: disable=too-many-return-statements """ Decode a TLV-encoded value from binary data. Args: data: Binary data to decode offset: Starting offset in the data Returns: tuple: (decoded_value, new_offset) Raises: DirtyProtocolError: If data is malformed or truncated """ if offset >= len(data): raise DirtyProtocolError( "Truncated TLV data: no type byte", raw_data=data[offset:offset + 20] ) type_code = data[offset] offset += 1 if type_code == TYPE_NONE: return None, offset if type_code == TYPE_BOOL: if offset >= len(data): raise DirtyProtocolError( "Truncated TLV data: missing bool value", raw_data=data[offset - 1:offset + 20] ) value = data[offset] != 0x00 return value, offset + 1 if type_code == TYPE_INT64: if offset + 8 > len(data): raise DirtyProtocolError( "Truncated TLV data: incomplete int64", raw_data=data[offset - 1:offset + 20] ) value = struct.unpack(">q", data[offset:offset + 8])[0] return value, offset + 8 if type_code == TYPE_FLOAT64: if offset + 8 > len(data): raise DirtyProtocolError( "Truncated TLV data: incomplete float64", raw_data=data[offset - 1:offset + 20] ) value = struct.unpack(">d", data[offset:offset + 8])[0] return value, offset + 8 if type_code == TYPE_BYTES: if offset + 4 > len(data): raise DirtyProtocolError( "Truncated TLV data: incomplete bytes length", raw_data=data[offset - 1:offset + 20] ) length = struct.unpack(">I", data[offset:offset + 4])[0] offset += 4 if length > MAX_BYTES_SIZE: raise DirtyProtocolError( f"Bytes too large: {length} bytes (max: {MAX_BYTES_SIZE})" ) if offset + length > len(data): raise DirtyProtocolError( f"Truncated TLV data: expected {length} bytes, " f"got {len(data) - offset}", raw_data=data[offset - 5:offset + 20] ) value = data[offset:offset + length] return value, offset + length if type_code == TYPE_STRING: if offset + 4 > len(data): raise DirtyProtocolError( "Truncated TLV data: incomplete string length", raw_data=data[offset - 1:offset + 20] ) length = struct.unpack(">I", data[offset:offset + 4])[0] offset += 4 if length > MAX_STRING_SIZE: raise DirtyProtocolError( f"String too large: {length} bytes (max: {MAX_STRING_SIZE})" ) if offset + length > len(data): raise DirtyProtocolError( f"Truncated TLV data: expected {length} bytes for string, " f"got {len(data) - offset}", raw_data=data[offset - 5:offset + 20] ) try: value = data[offset:offset + length].decode("utf-8") except UnicodeDecodeError as e: raise DirtyProtocolError( f"Invalid UTF-8 in string: {e}", raw_data=data[offset:offset + min(length, 20)] ) return value, offset + length if type_code == TYPE_LIST: if offset + 4 > len(data): raise DirtyProtocolError( "Truncated TLV data: incomplete list count", raw_data=data[offset - 1:offset + 20] ) count = struct.unpack(">I", data[offset:offset + 4])[0] offset += 4 if count > MAX_LIST_SIZE: raise DirtyProtocolError( f"List too large: {count} items (max: {MAX_LIST_SIZE})" ) items = [] for _ in range(count): item, offset = TLVEncoder.decode(data, offset) items.append(item) return items, offset if type_code == TYPE_DICT: if offset + 4 > len(data): raise DirtyProtocolError( "Truncated TLV data: incomplete dict count", raw_data=data[offset - 1:offset + 20] ) count = struct.unpack(">I", data[offset:offset + 4])[0] offset += 4 if count > MAX_DICT_SIZE: raise DirtyProtocolError( f"Dict too large: {count} items (max: {MAX_DICT_SIZE})" ) result = {} for _ in range(count): key, offset = TLVEncoder.decode(data, offset) if not isinstance(key, str): raise DirtyProtocolError( f"Dict key must be string, got {type(key).__name__}" ) value, offset = TLVEncoder.decode(data, offset) result[key] = value return result, offset raise DirtyProtocolError( f"Unknown TLV type code: 0x{type_code:02x}", raw_data=data[offset - 1:offset + 20] ) @staticmethod def decode_full(data: bytes): """ Decode a complete TLV-encoded value, ensuring all data is consumed. Args: data: Binary data to decode Returns: Decoded Python value Raises: DirtyProtocolError: If data is malformed or has trailing bytes """ value, offset = TLVEncoder.decode(data, 0) if offset != len(data): raise DirtyProtocolError( f"Trailing data after TLV: {len(data) - offset} bytes", raw_data=data[offset:offset + 20] ) return value benoitc-gunicorn-f5fb19e/gunicorn/dirty/worker.py000066400000000000000000000421041514360242400223260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Dirty Worker Process Asyncio-based worker that loads dirty apps and handles requests from the DirtyArbiter. Threading Model --------------- Each dirty worker runs an asyncio event loop in the main thread for: - Handling connections from the arbiter - Managing heartbeat updates - Coordinating task execution Actual app execution runs in a ThreadPoolExecutor (separate threads): - The number of threads is controlled by ``dirty_threads`` config (default: 1) - Each thread can execute one app action at a time - The asyncio event loop is NOT blocked by task execution State and Global Objects ------------------------ Apps can maintain persistent state because: 1. Apps are loaded ONCE when the worker starts (in ``load_apps()``) 2. The same app instances are reused for ALL requests 3. App state (instance variables, loaded models, etc.) persists Example:: class MLApp(DirtyApp): def init(self): self.model = load_heavy_model() # Loaded once, reused self.cache = {} # Persistent cache def predict(self, data): return self.model.predict(data) # Uses loaded model Thread Safety: - With ``dirty_threads=1`` (default): No concurrent access, thread-safe by design - With ``dirty_threads > 1``: Multiple threads share the same app instances, apps MUST be thread-safe (use locks, thread-local storage, etc.) Heartbeat and Liveness ---------------------- The worker sends heartbeat updates to prove it's alive: 1. A dedicated asyncio task (``_heartbeat_loop``) runs independently 2. It updates the heartbeat file every ``dirty_timeout / 2`` seconds 3. Since tasks run in executor threads, they do NOT block heartbeats 4. The arbiter kills workers that miss heartbeat updates Timeout Control --------------- Execution timeout is enforced at two levels: 1. **Worker level**: Each task execution has a timeout (``dirty_timeout``). If exceeded, the worker returns a timeout error but the thread may continue running (Python threads cannot be cancelled). 2. **Arbiter level**: The arbiter also enforces timeout when waiting for worker response. Workers that don't respond are killed via SIGABRT. Note: Since Python threads cannot be forcibly cancelled, a truly stuck operation will continue until the worker is killed by the arbiter. """ import asyncio import inspect import os import signal import traceback import uuid from gunicorn import util from gunicorn.workers.workertmp import WorkerTmp from .app import load_dirty_apps from .errors import ( DirtyAppError, DirtyAppNotFoundError, DirtyTimeoutError, DirtyWorkerError, ) from .protocol import ( DirtyProtocol, make_response, make_error_response, make_chunk_message, make_end_message, ) class DirtyWorker: """ Dirty worker process that loads dirty apps and handles requests. Each worker runs its own asyncio event loop and listens on a worker-specific Unix socket for requests from the DirtyArbiter. """ SIGNALS = [getattr(signal, "SIG%s" % x) for x in "ABRT HUP QUIT INT TERM USR1".split()] def __init__(self, age, ppid, app_paths, cfg, log, socket_path): """ Initialize a dirty worker. Args: age: Worker age (for identifying workers) ppid: Parent process ID app_paths: List of dirty app import paths cfg: Gunicorn config log: Logger socket_path: Path to this worker's Unix socket """ self.age = age self.pid = "[booting]" self.ppid = ppid self.app_paths = app_paths self.cfg = cfg self.log = log self.socket_path = socket_path self.booted = False self.aborted = False self.alive = True self.tmp = WorkerTmp(cfg) self.apps = {} self._server = None self._loop = None self._executor = None def __str__(self): return f"" def notify(self): """Update heartbeat timestamp.""" self.tmp.notify() def init_process(self): """ Initialize the worker process after fork. This is called in the child process after fork. It sets up the environment, loads apps, and starts the main run loop. """ # Set environment variables if self.cfg.env: for k, v in self.cfg.env.items(): os.environ[k] = v util.set_owner_process(self.cfg.uid, self.cfg.gid, initgroups=self.cfg.initgroups) # Reseed random number generator util.seed() # Prevent fd inheritance util.close_on_exec(self.tmp.fileno()) self.log.close_on_exec() # Set up signals self.init_signals() # Load dirty apps self.load_apps() # Call hook self.pid = os.getpid() self.cfg.dirty_worker_init(self) # Enter main run loop self.booted = True self.run() def init_signals(self): """Set up signal handlers.""" # Reset signal handlers from parent for sig in self.SIGNALS: signal.signal(sig, signal.SIG_DFL) # Handle graceful shutdown signal.signal(signal.SIGTERM, self._signal_handler) signal.signal(signal.SIGQUIT, self._signal_handler) signal.signal(signal.SIGINT, self._signal_handler) # Handle abort (timeout) signal.signal(signal.SIGABRT, self._signal_handler) # Handle USR1 (reopen logs) signal.signal(signal.SIGUSR1, self._signal_handler) def _signal_handler(self, sig, frame): """Handle signals by setting alive = False.""" if sig == signal.SIGUSR1: self.log.reopen_files() return self.alive = False if self._loop: self._loop.call_soon_threadsafe(self._shutdown) def _shutdown(self): """Initiate async shutdown.""" if self._server: self._server.close() def load_apps(self): """Load all configured dirty apps.""" try: self.apps = load_dirty_apps(self.app_paths) for path, app in self.apps.items(): self.log.debug("Loaded dirty app: %s", path) try: app.init() self.log.info("Initialized dirty app: %s", path) except Exception as e: self.log.error("Failed to initialize dirty app %s: %s", path, e) raise except Exception as e: self.log.error("Failed to load dirty apps: %s", e) raise def run(self): """Run the main asyncio event loop.""" # Lazy import for gevent compatibility (see #3482) from concurrent.futures import ThreadPoolExecutor # Create thread pool for executing app actions num_threads = self.cfg.dirty_threads self._executor = ThreadPoolExecutor( max_workers=num_threads, thread_name_prefix=f"dirty-worker-{self.pid}-" ) self.log.debug("Created thread pool with %d threads", num_threads) try: self._loop = asyncio.new_event_loop() asyncio.set_event_loop(self._loop) self._loop.run_until_complete(self._run_async()) except Exception as e: self.log.error("Worker error: %s", e) finally: self._cleanup() async def _run_async(self): """Main async loop - start server and handle connections.""" # Remove socket if it exists if os.path.exists(self.socket_path): os.unlink(self.socket_path) # Start Unix socket server self._server = await asyncio.start_unix_server( self.handle_connection, path=self.socket_path ) # Make socket accessible os.chmod(self.socket_path, 0o600) self.log.info("Dirty worker %s listening on %s", self.pid, self.socket_path) # Start heartbeat task heartbeat_task = asyncio.create_task(self._heartbeat_loop()) try: async with self._server: await self._server.serve_forever() except asyncio.CancelledError: pass finally: heartbeat_task.cancel() try: await heartbeat_task except asyncio.CancelledError: pass async def _heartbeat_loop(self): """Periodically update heartbeat.""" while self.alive: self.notify() await asyncio.sleep(self.cfg.dirty_timeout / 2.0) async def handle_connection(self, reader, writer): """ Handle a connection from the arbiter. Each connection can send multiple requests. """ self.log.debug("New connection from arbiter") try: while self.alive: try: message = await DirtyProtocol.read_message_async(reader) except asyncio.IncompleteReadError: # Connection closed break # Handle the request - pass writer for streaming support await self.handle_request(message, writer) except Exception as e: self.log.error("Connection error: %s", e) finally: writer.close() try: await writer.wait_closed() except Exception: pass async def handle_request(self, message, writer): """ Handle a single request message. Supports both regular (non-streaming) and streaming responses. For streaming, detects if the result is a generator and sends chunk messages followed by an end message. Args: message: Request dict from protocol writer: StreamWriter for sending responses """ request_id = message.get("id", str(uuid.uuid4())) msg_type = message.get("type") if msg_type != DirtyProtocol.MSG_TYPE_REQUEST: response = make_error_response( request_id, DirtyWorkerError(f"Unknown message type: {msg_type}") ) await DirtyProtocol.write_message_async(writer, response) return app_path = message.get("app_path") action = message.get("action") args = message.get("args", []) kwargs = message.get("kwargs", {}) # Update heartbeat before executing self.notify() try: result = await self.execute(app_path, action, args, kwargs) # Check if result is a generator (streaming) if inspect.isgenerator(result): await self._stream_sync_generator(request_id, result, writer) elif inspect.isasyncgen(result): await self._stream_async_generator(request_id, result, writer) else: # Regular non-streaming response response = make_response(request_id, result) await DirtyProtocol.write_message_async(writer, response) except Exception as e: tb = traceback.format_exc() self.log.error("Error executing %s.%s: %s\n%s", app_path, action, e, tb) response = make_error_response( request_id, DirtyAppError(str(e), app_path=app_path, action=action, traceback=tb) ) await DirtyProtocol.write_message_async(writer, response) async def _stream_sync_generator(self, request_id, gen, writer): """ Stream chunks from a synchronous generator. Args: request_id: Request ID for the messages gen: Sync generator to iterate writer: StreamWriter for sending messages """ # Sentinel value to detect end of generator # (StopIteration cannot be raised into a Future in Python 3.7+) _EXHAUSTED = object() def _get_next(): try: return next(gen) except StopIteration: return _EXHAUSTED try: loop = asyncio.get_running_loop() while True: # Run next() in executor to avoid blocking event loop chunk = await loop.run_in_executor(self._executor, _get_next) if chunk is _EXHAUSTED: break # Send chunk message await DirtyProtocol.write_message_async( writer, make_chunk_message(request_id, chunk) ) # Update heartbeat during long streams self.notify() # Send end message await DirtyProtocol.write_message_async( writer, make_end_message(request_id) ) except Exception as e: # Error during streaming - send error message tb = traceback.format_exc() self.log.error("Error during streaming: %s\n%s", e, tb) response = make_error_response( request_id, DirtyAppError(str(e), traceback=tb) ) await DirtyProtocol.write_message_async(writer, response) finally: gen.close() async def _stream_async_generator(self, request_id, gen, writer): """ Stream chunks from an asynchronous generator. Args: request_id: Request ID for the messages gen: Async generator to iterate writer: StreamWriter for sending messages """ try: async for chunk in gen: # Send chunk message await DirtyProtocol.write_message_async( writer, make_chunk_message(request_id, chunk) ) # Update heartbeat during long streams self.notify() # Send end message await DirtyProtocol.write_message_async( writer, make_end_message(request_id) ) except Exception as e: # Error during streaming - send error message tb = traceback.format_exc() self.log.error("Error during streaming: %s\n%s", e, tb) response = make_error_response( request_id, DirtyAppError(str(e), traceback=tb) ) await DirtyProtocol.write_message_async(writer, response) finally: await gen.aclose() async def execute(self, app_path, action, args, kwargs): """ Execute an action on a dirty app. The action runs in a thread pool executor to avoid blocking the asyncio event loop. Execution timeout is enforced using ``dirty_timeout`` config. Args: app_path: Import path of the dirty app action: Action name to execute args: Positional arguments kwargs: Keyword arguments Returns: Result from the app action Raises: DirtyAppNotFoundError: If app is not loaded DirtyTimeoutError: If execution exceeds timeout DirtyAppError: If execution fails """ if app_path not in self.apps: raise DirtyAppNotFoundError(app_path) app = self.apps[app_path] timeout = self.cfg.dirty_timeout if self.cfg.dirty_timeout > 0 else None # Run the app call in the thread pool to avoid blocking # the event loop for CPU-bound operations loop = asyncio.get_running_loop() try: result = await asyncio.wait_for( loop.run_in_executor( self._executor, lambda: app(action, *args, **kwargs) ), timeout=timeout ) return result except asyncio.TimeoutError: # Note: The thread continues running - we just stop waiting self.log.warning( "Execution timeout for %s.%s after %ds", app_path, action, timeout ) raise DirtyTimeoutError( f"Execution of {app_path}.{action} timed out", timeout=timeout ) def _cleanup(self): """Clean up resources on shutdown.""" # Shutdown thread pool executor if self._executor: self._executor.shutdown(wait=False, cancel_futures=True) self._executor = None # Close all apps for path, app in self.apps.items(): try: app.close() self.log.debug("Closed dirty app: %s", path) except Exception as e: self.log.error("Error closing dirty app %s: %s", path, e) # Close temp file try: self.tmp.close() except Exception: pass # Remove socket file try: if os.path.exists(self.socket_path): os.unlink(self.socket_path) except Exception: pass self.log.info("Dirty worker %s exiting", self.pid) benoitc-gunicorn-f5fb19e/gunicorn/errors.py000066400000000000000000000016011514360242400211730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # We don't need to call super() in __init__ methods of our # BaseException and Exception classes because we also define # our own __str__ methods so there is no need to pass 'message' # to the base class to get a meaningful output from 'str(exc)'. # pylint: disable=super-init-not-called # we inherit from BaseException here to make sure to not be caught # at application level class HaltServer(BaseException): def __init__(self, reason, exit_status=1): self.reason = reason self.exit_status = exit_status def __str__(self): return "" % (self.reason, self.exit_status) class ConfigError(Exception): """ Exception raised on config error """ class AppImportError(Exception): """ Exception raised when loading an application """ benoitc-gunicorn-f5fb19e/gunicorn/glogging.py000066400000000000000000000357131514360242400214670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import base64 import binascii import json import time import logging logging.Logger.manager.emittedNoHandlerWarning = 1 # noqa from logging.config import dictConfig from logging.config import fileConfig import os import socket import sys import threading import traceback from gunicorn import util # syslog facility codes SYSLOG_FACILITIES = { "auth": 4, "authpriv": 10, "cron": 9, "daemon": 3, "ftp": 11, "kern": 0, "lpr": 6, "mail": 2, "news": 7, "security": 4, # DEPRECATED "syslog": 5, "user": 1, "uucp": 8, "local0": 16, "local1": 17, "local2": 18, "local3": 19, "local4": 20, "local5": 21, "local6": 22, "local7": 23 } CONFIG_DEFAULTS = { "version": 1, "disable_existing_loggers": False, "root": {"level": "INFO", "handlers": ["console"]}, "loggers": { "gunicorn.error": { "level": "INFO", "handlers": ["error_console"], "propagate": True, "qualname": "gunicorn.error" }, "gunicorn.access": { "level": "INFO", "handlers": ["console"], "propagate": True, "qualname": "gunicorn.access" } }, "handlers": { "console": { "class": "logging.StreamHandler", "formatter": "generic", "stream": "ext://sys.stdout" }, "error_console": { "class": "logging.StreamHandler", "formatter": "generic", "stream": "ext://sys.stderr" }, }, "formatters": { "generic": { "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s", "datefmt": "[%Y-%m-%d %H:%M:%S %z]", "class": "logging.Formatter" } } } def loggers(): """ get list of all loggers """ root = logging.root existing = list(root.manager.loggerDict.keys()) return [logging.getLogger(name) for name in existing] class SafeAtoms(dict): def __init__(self, atoms): dict.__init__(self) for key, value in atoms.items(): if isinstance(value, str): self[key] = value.replace('"', '\\"') else: self[key] = value def __getitem__(self, k): if k.startswith("{"): kl = k.lower() if kl in self: return super().__getitem__(kl) else: return "-" if k in self: return super().__getitem__(k) else: return '-' def parse_syslog_address(addr): # unix domain socket type depends on backend # SysLogHandler will try both when given None if addr.startswith("unix://"): sock_type = None # set socket type only if explicitly requested parts = addr.split("#", 1) if len(parts) == 2: addr = parts[0] if parts[1] == "dgram": sock_type = socket.SOCK_DGRAM return (sock_type, addr.split("unix://")[1]) if addr.startswith("udp://"): addr = addr.split("udp://")[1] socktype = socket.SOCK_DGRAM elif addr.startswith("tcp://"): addr = addr.split("tcp://")[1] socktype = socket.SOCK_STREAM else: raise RuntimeError("invalid syslog address") if '[' in addr and ']' in addr: host = addr.split(']')[0][1:].lower() elif ':' in addr: host = addr.split(':')[0].lower() elif addr == "": host = "localhost" else: host = addr.lower() addr = addr.split(']')[-1] if ":" in addr: port = addr.split(':', 1)[1] if not port.isdigit(): raise RuntimeError("%r is not a valid port number." % port) port = int(port) else: port = 514 return (socktype, (host, port)) class Logger: LOG_LEVELS = { "critical": logging.CRITICAL, "error": logging.ERROR, "warning": logging.WARNING, "info": logging.INFO, "debug": logging.DEBUG } loglevel = logging.INFO error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s" datefmt = r"[%Y-%m-%d %H:%M:%S %z]" access_fmt = "%(message)s" syslog_fmt = "[%(process)d] %(message)s" atoms_wrapper_class = SafeAtoms def __init__(self, cfg): self.error_log = logging.getLogger("gunicorn.error") self.error_log.propagate = False self.access_log = logging.getLogger("gunicorn.access") self.access_log.propagate = False self.error_handlers = [] self.access_handlers = [] self.logfile = None self.lock = threading.Lock() self.cfg = cfg self.setup(cfg) def setup(self, cfg): self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO) self.error_log.setLevel(self.loglevel) self.access_log.setLevel(logging.INFO) # set gunicorn.error handler if self.cfg.capture_output and cfg.errorlog != "-": for stream in sys.stdout, sys.stderr: stream.flush() self.logfile = open(cfg.errorlog, 'a+') os.dup2(self.logfile.fileno(), sys.stdout.fileno()) os.dup2(self.logfile.fileno(), sys.stderr.fileno()) self._set_handler(self.error_log, cfg.errorlog, logging.Formatter(self.error_fmt, self.datefmt)) # set gunicorn.access handler if cfg.accesslog is not None: self._set_handler( self.access_log, cfg.accesslog, fmt=logging.Formatter(self.access_fmt), stream=sys.stdout ) # set syslog handler if cfg.syslog: self._set_syslog_handler( self.error_log, cfg, self.syslog_fmt, "error" ) if not cfg.disable_redirect_access_to_syslog: self._set_syslog_handler( self.access_log, cfg, self.syslog_fmt, "access" ) if cfg.logconfig_dict: config = CONFIG_DEFAULTS.copy() config.update(cfg.logconfig_dict) try: dictConfig(config) except ( AttributeError, ImportError, ValueError, TypeError ) as exc: raise RuntimeError(str(exc)) from exc elif cfg.logconfig_json: config = CONFIG_DEFAULTS.copy() if os.path.exists(cfg.logconfig_json): try: config_json = json.load(open(cfg.logconfig_json)) config.update(config_json) dictConfig(config) except ( json.JSONDecodeError, AttributeError, ImportError, ValueError, TypeError ) as exc: raise RuntimeError(str(exc)) from exc elif cfg.logconfig: if os.path.exists(cfg.logconfig): defaults = CONFIG_DEFAULTS.copy() defaults['__file__'] = cfg.logconfig defaults['here'] = os.path.dirname(cfg.logconfig) fileConfig(cfg.logconfig, defaults=defaults, disable_existing_loggers=False) else: msg = "Error: log config '%s' not found" raise RuntimeError(msg % cfg.logconfig) def critical(self, msg, *args, **kwargs): self.error_log.critical(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): self.error_log.error(msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): self.error_log.warning(msg, *args, **kwargs) def info(self, msg, *args, **kwargs): self.error_log.info(msg, *args, **kwargs) def debug(self, msg, *args, **kwargs): self.error_log.debug(msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): self.error_log.exception(msg, *args, **kwargs) def log(self, lvl, msg, *args, **kwargs): if isinstance(lvl, str): lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO) self.error_log.log(lvl, msg, *args, **kwargs) def atoms(self, resp, req, environ, request_time): """ Gets atoms for log formatting. """ status = resp.status if isinstance(status, str): status = status.split(None, 1)[0] atoms = { 'h': environ.get('REMOTE_ADDR', '-'), 'l': '-', 'u': self._get_user(environ) or '-', 't': self.now(), 'r': "%s %s %s" % (environ['REQUEST_METHOD'], environ['RAW_URI'], environ["SERVER_PROTOCOL"]), 's': status, 'm': environ.get('REQUEST_METHOD'), 'U': environ.get('PATH_INFO'), 'q': environ.get('QUERY_STRING'), 'H': environ.get('SERVER_PROTOCOL'), 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-', 'B': getattr(resp, 'sent', None), 'f': environ.get('HTTP_REFERER', '-'), 'a': environ.get('HTTP_USER_AGENT', '-'), 'T': request_time.seconds, 'D': (request_time.seconds * 1000000) + request_time.microseconds, 'M': (request_time.seconds * 1000) + int(request_time.microseconds / 1000), 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds), 'p': "<%s>" % os.getpid() } # add request headers if hasattr(req, 'headers'): req_headers = req.headers else: req_headers = req if hasattr(req_headers, "items"): req_headers = req_headers.items() atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers}) resp_headers = resp.headers if hasattr(resp_headers, "items"): resp_headers = resp_headers.items() # add response headers atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers}) # add environ variables environ_variables = environ.items() atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables}) return atoms def access(self, resp, req, environ, request_time): """ See http://httpd.apache.org/docs/2.0/logs.html#combined for format details """ if not (self.cfg.accesslog or self.cfg.logconfig or self.cfg.logconfig_dict or self.cfg.logconfig_json or (self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)): return # wrap atoms: # - make sure atoms will be test case insensitively # - if atom doesn't exist replace it by '-' safe_atoms = self.atoms_wrapper_class( self.atoms(resp, req, environ, request_time) ) try: self.access_log.info(self.cfg.access_log_format, safe_atoms) except Exception: self.error(traceback.format_exc()) def now(self): """ return date in Apache Common Log Format """ return time.strftime('[%d/%b/%Y:%H:%M:%S %z]') def reopen_files(self): if self.cfg.capture_output and self.cfg.errorlog != "-": for stream in sys.stdout, sys.stderr: stream.flush() with self.lock: if self.logfile is not None: self.logfile.close() self.logfile = open(self.cfg.errorlog, 'a+') os.dup2(self.logfile.fileno(), sys.stdout.fileno()) os.dup2(self.logfile.fileno(), sys.stderr.fileno()) for log in loggers(): for handler in log.handlers: if isinstance(handler, logging.FileHandler): handler.acquire() try: if handler.stream: handler.close() handler.stream = handler._open() finally: handler.release() def close_on_exec(self): for log in loggers(): for handler in log.handlers: if isinstance(handler, logging.FileHandler): handler.acquire() try: if handler.stream: util.close_on_exec(handler.stream.fileno()) finally: handler.release() def _get_gunicorn_handler(self, log): for h in log.handlers: if getattr(h, "_gunicorn", False): return h def _set_handler(self, log, output, fmt, stream=None): # remove previous gunicorn log handler h = self._get_gunicorn_handler(log) if h: log.handlers.remove(h) if output is not None: if output == "-": h = logging.StreamHandler(stream) else: util.check_is_writable(output) h = logging.FileHandler(output) # make sure the user can reopen the file try: os.chown(h.baseFilename, self.cfg.user, self.cfg.group) except OSError: # it's probably OK there, we assume the user has given # /dev/null as a parameter. pass h.setFormatter(fmt) h._gunicorn = True log.addHandler(h) def _set_syslog_handler(self, log, cfg, fmt, name): # setup format prefix = cfg.syslog_prefix or cfg.proc_name.replace(":", ".") prefix = "gunicorn.%s.%s" % (prefix, name) # set format fmt = logging.Formatter(r"%s: %s" % (prefix, fmt)) # syslog facility try: facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()] except KeyError as exc: raise RuntimeError("unknown facility name") from exc # parse syslog address socktype, addr = parse_syslog_address(cfg.syslog_addr) # finally setup the syslog handler h = logging.handlers.SysLogHandler(address=addr, facility=facility, socktype=socktype) h.setFormatter(fmt) h._gunicorn = True log.addHandler(h) def _get_user(self, environ): user = None http_auth = environ.get("HTTP_AUTHORIZATION") if http_auth and http_auth.lower().startswith('basic'): auth = http_auth.split(" ", 1) if len(auth) == 2: try: # b64decode doesn't accept unicode in Python < 3.3 # so we need to convert it to a byte string auth = base64.b64decode(auth[1].strip().encode('utf-8')) # b64decode returns a byte string user = auth.split(b":", 1)[0].decode("UTF-8") except (TypeError, binascii.Error, UnicodeDecodeError) as exc: self.debug("Couldn't get username: %s", exc) return user benoitc-gunicorn-f5fb19e/gunicorn/http/000077500000000000000000000000001514360242400202665ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/http/__init__.py000066400000000000000000000022121514360242400223740ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.message import Message, Request from gunicorn.http.parser import RequestParser def get_parser(cfg, source, source_addr, http2_connection=False): """Get appropriate parser based on protocol config. Args: cfg: Gunicorn config object source: Socket or iterable source source_addr: Source address tuple or None http2_connection: If True, create HTTP/2 connection handler Returns: Parser instance (RequestParser, UWSGIParser, or HTTP2ServerConnection) """ # HTTP/2 connection if http2_connection: from gunicorn.http2.connection import HTTP2ServerConnection return HTTP2ServerConnection(cfg, source, source_addr) # uWSGI protocol protocol = getattr(cfg, 'protocol', 'http') if protocol == 'uwsgi': from gunicorn.uwsgi.parser import UWSGIParser return UWSGIParser(cfg, source, source_addr) # Default HTTP/1.x return RequestParser(cfg, source, source_addr) __all__ = ['Message', 'Request', 'RequestParser', 'get_parser'] benoitc-gunicorn-f5fb19e/gunicorn/http/body.py000066400000000000000000000166601514360242400216060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import sys from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator, InvalidChunkSize) class ChunkedReader: def __init__(self, req, unreader): self.req = req self.parser = self.parse_chunked(unreader) self.buf = io.BytesIO() def read(self, size): if not isinstance(size, int): raise TypeError("size must be an integer type") if size < 0: raise ValueError("Size must be positive.") if size == 0: return b"" if self.parser: while self.buf.tell() < size: try: self.buf.write(next(self.parser)) except StopIteration: self.parser = None break data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret def parse_trailers(self, unreader, data): buf = io.BytesIO() buf.write(data) idx = buf.getvalue().find(b"\r\n\r\n") done = buf.getvalue()[:2] == b"\r\n" while idx < 0 and not done: self.get_data(unreader, buf) idx = buf.getvalue().find(b"\r\n\r\n") done = buf.getvalue()[:2] == b"\r\n" if done: unreader.unread(buf.getvalue()[2:]) return b"" self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx], from_trailer=True) unreader.unread(buf.getvalue()[idx + 4:]) def parse_chunked(self, unreader): (size, rest) = self.parse_chunk_size(unreader) while size > 0: while size > len(rest): size -= len(rest) yield rest rest = unreader.read() if not rest: raise NoMoreData() yield rest[:size] # Remove \r\n after chunk rest = rest[size:] while len(rest) < 2: new_data = unreader.read() if not new_data: break rest += new_data if rest[:2] != b'\r\n': raise ChunkMissingTerminator(rest[:2]) (size, rest) = self.parse_chunk_size(unreader, data=rest[2:]) def parse_chunk_size(self, unreader, data=None): buf = io.BytesIO() if data is not None: buf.write(data) idx = buf.getvalue().find(b"\r\n") while idx < 0: self.get_data(unreader, buf) idx = buf.getvalue().find(b"\r\n") data = buf.getvalue() line, rest_chunk = data[:idx], data[idx + 2:] # RFC9112 7.1.1: BWS before chunk-ext - but ONLY then chunk_size, *chunk_ext = line.split(b";", 1) if chunk_ext: chunk_size = chunk_size.rstrip(b" \t") if any(n not in b"0123456789abcdefABCDEF" for n in chunk_size): raise InvalidChunkSize(chunk_size) if len(chunk_size) == 0: raise InvalidChunkSize(chunk_size) chunk_size = int(chunk_size, 16) if chunk_size == 0: try: self.parse_trailers(unreader, rest_chunk) except NoMoreData: pass return (0, None) return (chunk_size, rest_chunk) def get_data(self, unreader, buf): data = unreader.read() if not data: raise NoMoreData() buf.write(data) class LengthReader: def __init__(self, unreader, length): self.unreader = unreader self.length = length def read(self, size): if not isinstance(size, int): raise TypeError("size must be an integral type") size = min(self.length, size) if size < 0: raise ValueError("Size must be positive.") if size == 0: return b"" buf = io.BytesIO() data = self.unreader.read() while data: buf.write(data) if buf.tell() >= size: break data = self.unreader.read() buf = buf.getvalue() ret, rest = buf[:size], buf[size:] self.unreader.unread(rest) self.length -= size return ret class EOFReader: def __init__(self, unreader): self.unreader = unreader self.buf = io.BytesIO() self.finished = False def read(self, size): if not isinstance(size, int): raise TypeError("size must be an integral type") if size < 0: raise ValueError("Size must be positive.") if size == 0: return b"" if self.finished: data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret data = self.unreader.read() while data: self.buf.write(data) if self.buf.tell() > size: break data = self.unreader.read() if not data: self.finished = True data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret class Body: def __init__(self, reader): self.reader = reader self.buf = io.BytesIO() def __iter__(self): return self def __next__(self): ret = self.readline() if not ret: raise StopIteration() return ret next = __next__ def getsize(self, size): if size is None: return sys.maxsize elif not isinstance(size, int): raise TypeError("size must be an integral type") elif size < 0: return sys.maxsize return size def read(self, size=None): size = self.getsize(size) if size == 0: return b"" if size < self.buf.tell(): data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret while size > self.buf.tell(): data = self.reader.read(1024) if not data: break self.buf.write(data) data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret def readline(self, size=None): size = self.getsize(size) if size == 0: return b"" data = self.buf.getvalue() self.buf = io.BytesIO() ret = [] while 1: idx = data.find(b"\n", 0, size) idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0 if idx: ret.append(data[:idx]) self.buf.write(data[idx:]) break ret.append(data) size -= len(data) data = self.reader.read(min(1024, size)) if not data: break return b"".join(ret) def readlines(self, size=None): ret = [] data = self.read() while data: pos = data.find(b"\n") if pos < 0: ret.append(data) data = b"" else: line, data = data[:pos + 1], data[pos + 1:] ret.append(line) return ret benoitc-gunicorn-f5fb19e/gunicorn/http/errors.py000066400000000000000000000073731514360242400221660ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # We don't need to call super() in __init__ methods of our # BaseException and Exception classes because we also define # our own __str__ methods so there is no need to pass 'message' # to the base class to get a meaningful output from 'str(exc)'. # pylint: disable=super-init-not-called class ParseException(Exception): pass class NoMoreData(IOError): def __init__(self, buf=None): self.buf = buf def __str__(self): return "No more data after: %r" % self.buf class ConfigurationProblem(ParseException): def __init__(self, info): self.info = info self.code = 500 def __str__(self): return "Configuration problem: %s" % self.info class InvalidRequestLine(ParseException): def __init__(self, req): self.req = req self.code = 400 def __str__(self): return "Invalid HTTP request line: %r" % self.req class InvalidRequestMethod(ParseException): def __init__(self, method): self.method = method def __str__(self): return "Invalid HTTP method: %r" % self.method class ExpectationFailed(ParseException): def __init__(self, expect): self.expect = expect def __str__(self): return "Unable to comply with expectation: %r" % (self.expect, ) class InvalidHTTPVersion(ParseException): def __init__(self, version): self.version = version def __str__(self): return "Invalid HTTP Version: %r" % (self.version,) class InvalidHeader(ParseException): def __init__(self, hdr, req=None): self.hdr = hdr self.req = req def __str__(self): return "Invalid HTTP Header: %r" % self.hdr class ObsoleteFolding(ParseException): def __init__(self, hdr): self.hdr = hdr def __str__(self): return "Obsolete line folding is unacceptable: %r" % (self.hdr, ) class InvalidHeaderName(ParseException): def __init__(self, hdr): self.hdr = hdr def __str__(self): return "Invalid HTTP header name: %r" % self.hdr class UnsupportedTransferCoding(ParseException): def __init__(self, hdr): self.hdr = hdr self.code = 501 def __str__(self): return "Unsupported transfer coding: %r" % self.hdr class InvalidChunkSize(IOError): def __init__(self, data): self.data = data def __str__(self): return "Invalid chunk size: %r" % self.data class ChunkMissingTerminator(IOError): def __init__(self, term): self.term = term def __str__(self): return "Invalid chunk terminator is not '\\r\\n': %r" % self.term class LimitRequestLine(ParseException): def __init__(self, size, max_size): self.size = size self.max_size = max_size def __str__(self): return "Request Line is too large (%s > %s)" % (self.size, self.max_size) class LimitRequestHeaders(ParseException): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class InvalidProxyLine(ParseException): def __init__(self, line): self.line = line self.code = 400 def __str__(self): return "Invalid PROXY line: %r" % self.line class InvalidProxyHeader(ParseException): def __init__(self, msg): self.msg = msg self.code = 400 def __str__(self): return "Invalid PROXY header: %s" % self.msg class ForbiddenProxyRequest(ParseException): def __init__(self, host): self.host = host self.code = 403 def __str__(self): return "Proxy request from %r not allowed" % self.host class InvalidSchemeHeaders(ParseException): def __str__(self): return "Contradictory scheme headers" benoitc-gunicorn-f5fb19e/gunicorn/http/message.py000066400000000000000000000575701514360242400223020ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from enum import IntEnum import ipaddress import re import socket import struct from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body from gunicorn.http.errors import ( InvalidHeader, InvalidHeaderName, NoMoreData, InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders, UnsupportedTransferCoding, ObsoleteFolding, ExpectationFailed, ) from gunicorn.http.errors import InvalidProxyLine, InvalidProxyHeader, ForbiddenProxyRequest from gunicorn.http.errors import InvalidSchemeHeaders from gunicorn.util import bytes_to_str, split_request_uri # PROXY protocol v2 constants PP_V2_SIGNATURE = b"\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A" class PPCommand(IntEnum): """PROXY protocol v2 commands.""" LOCAL = 0x0 PROXY = 0x1 class PPFamily(IntEnum): """PROXY protocol v2 address families.""" UNSPEC = 0x0 INET = 0x1 # IPv4 INET6 = 0x2 # IPv6 UNIX = 0x3 class PPProtocol(IntEnum): """PROXY protocol v2 transport protocols.""" UNSPEC = 0x0 STREAM = 0x1 # TCP DGRAM = 0x2 # UDP MAX_REQUEST_LINE = 8190 MAX_HEADERS = 32768 DEFAULT_MAX_HEADERFIELD_SIZE = 8190 # verbosely on purpose, avoid backslash ambiguity RFC9110_5_6_2_TOKEN_SPECIALS = r"!#$%&'*+-.^_`|~" TOKEN_RE = re.compile(r"[%s0-9a-zA-Z]+" % (re.escape(RFC9110_5_6_2_TOKEN_SPECIALS))) METHOD_BADCHAR_RE = re.compile("[a-z#]") # usually 1.0 or 1.1 - RFC9112 permits restricting to single-digit versions VERSION_RE = re.compile(r"HTTP/(\d)\.(\d)") RFC9110_5_5_INVALID_AND_DANGEROUS = re.compile(r"[\0\r\n]") def _ip_in_allow_list(ip_str, allow_list, networks): """Check if IP address is in the allow list. Args: ip_str: The IP address string to check allow_list: The original allow list (strings, may contain "*") networks: Pre-computed ipaddress.ip_network objects from config """ if '*' in allow_list: return True try: ip = ipaddress.ip_address(ip_str) except ValueError: return False for network in networks: if ip in network: return True return False class Message: def __init__(self, cfg, unreader, peer_addr): self.cfg = cfg self.unreader = unreader self.peer_addr = peer_addr self.remote_addr = peer_addr self.version = None self.headers = [] self.trailers = [] self.body = None self.scheme = "https" if cfg.is_ssl else "http" self.must_close = False self._expected_100_continue = False # set headers limits self.limit_request_fields = cfg.limit_request_fields if (self.limit_request_fields <= 0 or self.limit_request_fields > MAX_HEADERS): self.limit_request_fields = MAX_HEADERS self.limit_request_field_size = cfg.limit_request_field_size if self.limit_request_field_size < 0: self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE # set max header buffer size max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE self.max_buffer_headers = self.limit_request_fields * \ (max_header_field_size + 2) + 4 unused = self.parse(self.unreader) self.unreader.unread(unused) self.set_body_reader() def force_close(self): self.must_close = True def parse(self, unreader): raise NotImplementedError() def parse_headers(self, data, from_trailer=False): cfg = self.cfg headers = [] # Split lines on \r\n lines = [bytes_to_str(line) for line in data.split(b"\r\n")] # handle scheme headers scheme_header = False secure_scheme_headers = {} forwarder_headers = [] if from_trailer: # nonsense. either a request is https from the beginning # .. or we are just behind a proxy who does not remove conflicting trailers pass elif (not isinstance(self.peer_addr, tuple) or _ip_in_allow_list(self.peer_addr[0], cfg.forwarded_allow_ips, cfg.forwarded_allow_networks())): secure_scheme_headers = cfg.secure_scheme_headers forwarder_headers = cfg.forwarder_headers # Parse headers into key/value pairs paying attention # to continuation lines. while lines: if len(headers) >= self.limit_request_fields: raise LimitRequestHeaders("limit request headers fields") # Parse initial header name: value pair. curr = lines.pop(0) header_length = len(curr) + len("\r\n") if curr.find(":") <= 0: raise InvalidHeader(curr) name, value = curr.split(":", 1) if self.cfg.strip_header_spaces: name = name.rstrip(" \t") if not TOKEN_RE.fullmatch(name): raise InvalidHeaderName(name) # this is still a dangerous place to do this # but it is more correct than doing it before the pattern match: # after we entered Unicode wonderland, 8bits could case-shift into ASCII: # b"\xDF".decode("latin-1").upper().encode("ascii") == b"SS" name = name.upper() value = [value.strip(" \t")] # Consume value continuation lines.. while lines and lines[0].startswith((" ", "\t")): # .. which is obsolete here, and no longer done by default if not self.cfg.permit_obsolete_folding: raise ObsoleteFolding(name) curr = lines.pop(0) header_length += len(curr) + len("\r\n") if header_length > self.limit_request_field_size > 0: raise LimitRequestHeaders("limit request headers " "fields size") value.append(curr.strip("\t ")) value = " ".join(value) if RFC9110_5_5_INVALID_AND_DANGEROUS.search(value): raise InvalidHeader(name) if header_length > self.limit_request_field_size > 0: raise LimitRequestHeaders("limit request headers fields size") if not from_trailer and name == "EXPECT": # https://datatracker.ietf.org/doc/html/rfc9110#section-10.1.1 # "The Expect field value is case-insensitive." if value.lower() == "100-continue": if self.version < (1, 1): # https://datatracker.ietf.org/doc/html/rfc9110#section-10.1.1-12 # "A server that receives a 100-continue expectation # in an HTTP/1.0 request MUST ignore that expectation." pass else: self._expected_100_continue = True # N.B. understood but ignored expect header does not return 417 else: raise ExpectationFailed(value) if name in secure_scheme_headers: secure = value == secure_scheme_headers[name] scheme = "https" if secure else "http" if scheme_header: if scheme != self.scheme: raise InvalidSchemeHeaders() else: scheme_header = True self.scheme = scheme # ambiguous mapping allows fooling downstream, e.g. merging non-identical headers: # X-Forwarded-For: 2001:db8::ha:cc:ed # X_Forwarded_For: 127.0.0.1,::1 # HTTP_X_FORWARDED_FOR = 2001:db8::ha:cc:ed,127.0.0.1,::1 # Only modify after fixing *ALL* header transformations; network to wsgi env if "_" in name: if name in forwarder_headers or "*" in forwarder_headers: # This forwarder may override our environment pass elif self.cfg.header_map == "dangerous": # as if we did not know we cannot safely map this pass elif self.cfg.header_map == "drop": # almost as if it never had been there # but still counts against resource limits continue else: # fail-safe fallthrough: refuse raise InvalidHeaderName(name) headers.append((name, value)) return headers def set_body_reader(self): chunked = False content_length = None for (name, value) in self.headers: if name == "CONTENT-LENGTH": if content_length is not None: raise InvalidHeader("CONTENT-LENGTH", req=self) content_length = value elif name == "TRANSFER-ENCODING": # T-E can be a list # https://datatracker.ietf.org/doc/html/rfc9112#name-transfer-encoding vals = [v.strip() for v in value.split(',')] for val in vals: if val.lower() == "chunked": # DANGER: transfer codings stack, and stacked chunking is never intended if chunked: raise InvalidHeader("TRANSFER-ENCODING", req=self) chunked = True elif val.lower() == "identity": # does not do much, could still plausibly desync from what the proxy does # safe option: nuke it, its never needed if chunked: raise InvalidHeader("TRANSFER-ENCODING", req=self) elif val.lower() in ('compress', 'deflate', 'gzip'): # chunked should be the last one if chunked: raise InvalidHeader("TRANSFER-ENCODING", req=self) self.force_close() else: raise UnsupportedTransferCoding(value) if chunked: # two potentially dangerous cases: # a) CL + TE (TE overrides CL.. only safe if the recipient sees it that way too) # b) chunked HTTP/1.0 (always faulty) if self.version < (1, 1): # framing wonky, see RFC 9112 Section 6.1 raise InvalidHeader("TRANSFER-ENCODING", req=self) if content_length is not None: # we cannot be certain the message framing we understood matches proxy intent # -> whatever happens next, remaining input must not be trusted raise InvalidHeader("CONTENT-LENGTH", req=self) self.body = Body(ChunkedReader(self, self.unreader)) elif content_length is not None: try: if str(content_length).isnumeric(): content_length = int(content_length) else: raise InvalidHeader("CONTENT-LENGTH", req=self) except ValueError: raise InvalidHeader("CONTENT-LENGTH", req=self) if content_length < 0: raise InvalidHeader("CONTENT-LENGTH", req=self) self.body = Body(LengthReader(self.unreader, content_length)) else: self.body = Body(EOFReader(self.unreader)) def should_close(self): if self.must_close: return True for (h, v) in self.headers: if h == "CONNECTION": v = v.lower().strip(" \t") if v == "close": return True elif v == "keep-alive": return False break return self.version <= (1, 0) class Request(Message): def __init__(self, cfg, unreader, peer_addr, req_number=1): self.method = None self.uri = None self.path = None self.query = None self.fragment = None # get max request line size self.limit_request_line = cfg.limit_request_line if (self.limit_request_line < 0 or self.limit_request_line >= MAX_REQUEST_LINE): self.limit_request_line = MAX_REQUEST_LINE self.req_number = req_number self.proxy_protocol_info = None super().__init__(cfg, unreader, peer_addr) def get_data(self, unreader, buf, stop=False): data = unreader.read() if not data: if stop: raise StopIteration() raise NoMoreData(buf.getvalue()) buf.write(data) def parse(self, unreader): buf = bytearray() self.read_into(unreader, buf, stop=True) # Handle proxy protocol if enabled and this is the first request mode = self.cfg.proxy_protocol if mode != "off" and self.req_number == 1: buf = self._handle_proxy_protocol(unreader, buf, mode) # Get request line line, buf = self.read_line(unreader, buf, self.limit_request_line) self.parse_request_line(line) # Headers data = bytes(buf) done = data[:2] == b"\r\n" while True: idx = data.find(b"\r\n\r\n") done = data[:2] == b"\r\n" if idx < 0 and not done: self.read_into(unreader, buf) data = bytes(buf) if len(data) > self.max_buffer_headers: raise LimitRequestHeaders("max buffer headers") else: break if done: self.unreader.unread(data[2:]) return b"" self.headers = self.parse_headers(data[:idx], from_trailer=False) ret = data[idx + 4:] return ret def read_into(self, unreader, buf, stop=False): """Read data from unreader and append to bytearray buffer.""" data = unreader.read() if not data: if stop: raise StopIteration() raise NoMoreData(bytes(buf)) buf.extend(data) def read_line(self, unreader, buf, limit=0): """Read a line from buffer, returning (line, remaining_buffer).""" data = bytes(buf) while True: idx = data.find(b"\r\n") if idx >= 0: # check if the request line is too large if idx > limit > 0: raise LimitRequestLine(idx, limit) break if len(data) - 2 > limit > 0: raise LimitRequestLine(len(data), limit) self.read_into(unreader, buf) data = bytes(buf) return (data[:idx], # request line, bytearray(data[idx + 2:])) # residue in the buffer, skip \r\n def read_bytes(self, unreader, buf, count): """Read exactly count bytes from buffer/unreader.""" while len(buf) < count: self.read_into(unreader, buf) return bytes(buf[:count]), bytearray(buf[count:]) def _handle_proxy_protocol(self, unreader, buf, mode): """Handle PROXY protocol detection and parsing. Returns the buffer with proxy protocol data consumed. """ # Ensure we have enough data to detect v2 signature (12 bytes) while len(buf) < 12: self.read_into(unreader, buf) # Check for v2 signature first if mode in ("v2", "auto") and buf[:12] == PP_V2_SIGNATURE: self.proxy_protocol_access_check() return self._parse_proxy_protocol_v2(unreader, buf) # Check for v1 prefix if mode in ("v1", "auto") and buf[:6] == b"PROXY ": self.proxy_protocol_access_check() return self._parse_proxy_protocol_v1(unreader, buf) # Not proxy protocol - return buffer unchanged return buf def proxy_protocol_access_check(self): """Check if proxy protocol is allowed from this peer.""" if (isinstance(self.peer_addr, tuple) and not _ip_in_allow_list(self.peer_addr[0], self.cfg.proxy_allow_ips, self.cfg.proxy_allow_networks())): raise ForbiddenProxyRequest(self.peer_addr[0]) def _parse_proxy_protocol_v1(self, unreader, buf): """Parse PROXY protocol v1 (text format). Returns buffer with v1 header consumed. """ # Read until we find \r\n data = bytes(buf) while b"\r\n" not in data: self.read_into(unreader, buf) data = bytes(buf) idx = data.find(b"\r\n") line = bytes_to_str(data[:idx]) remaining = bytearray(data[idx + 2:]) bits = line.split(" ") if len(bits) != 6: raise InvalidProxyLine(line) # Extract data proto = bits[1] s_addr = bits[2] d_addr = bits[3] # Validation if proto not in ["TCP4", "TCP6"]: raise InvalidProxyLine("protocol '%s' not supported" % proto) if proto == "TCP4": try: socket.inet_pton(socket.AF_INET, s_addr) socket.inet_pton(socket.AF_INET, d_addr) except OSError: raise InvalidProxyLine(line) elif proto == "TCP6": try: socket.inet_pton(socket.AF_INET6, s_addr) socket.inet_pton(socket.AF_INET6, d_addr) except OSError: raise InvalidProxyLine(line) try: s_port = int(bits[4]) d_port = int(bits[5]) except ValueError: raise InvalidProxyLine("invalid port %s" % line) if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)): raise InvalidProxyLine("invalid port %s" % line) # Set data self.proxy_protocol_info = { "proxy_protocol": proto, "client_addr": s_addr, "client_port": s_port, "proxy_addr": d_addr, "proxy_port": d_port } return remaining def _parse_proxy_protocol_v2(self, unreader, buf): """Parse PROXY protocol v2 (binary format). Returns buffer with v2 header consumed. """ # We need at least 16 bytes for the header (12 signature + 4 header) while len(buf) < 16: self.read_into(unreader, buf) # Parse header fields (after 12-byte signature) ver_cmd = buf[12] fam_proto = buf[13] length = struct.unpack(">H", bytes(buf[14:16]))[0] # Validate version (high nibble must be 0x2) version = (ver_cmd & 0xF0) >> 4 if version != 2: raise InvalidProxyHeader("unsupported version %d" % version) # Extract command (low nibble) command = ver_cmd & 0x0F if command not in (PPCommand.LOCAL, PPCommand.PROXY): raise InvalidProxyHeader("unsupported command %d" % command) # Ensure we have the complete header total_header_size = 16 + length while len(buf) < total_header_size: self.read_into(unreader, buf) # For LOCAL command, no address info is provided if command == PPCommand.LOCAL: self.proxy_protocol_info = { "proxy_protocol": "LOCAL", "client_addr": None, "client_port": None, "proxy_addr": None, "proxy_port": None } return bytearray(buf[total_header_size:]) # Extract address family and protocol family = (fam_proto & 0xF0) >> 4 protocol = fam_proto & 0x0F # We only support TCP (STREAM) if protocol != PPProtocol.STREAM: raise InvalidProxyHeader("only TCP protocol is supported") addr_data = bytes(buf[16:16 + length]) if family == PPFamily.INET: # IPv4 if length < 12: # 4+4+2+2 raise InvalidProxyHeader("insufficient address data for IPv4") s_addr = socket.inet_ntop(socket.AF_INET, addr_data[0:4]) d_addr = socket.inet_ntop(socket.AF_INET, addr_data[4:8]) s_port = struct.unpack(">H", addr_data[8:10])[0] d_port = struct.unpack(">H", addr_data[10:12])[0] proto = "TCP4" elif family == PPFamily.INET6: # IPv6 if length < 36: # 16+16+2+2 raise InvalidProxyHeader("insufficient address data for IPv6") s_addr = socket.inet_ntop(socket.AF_INET6, addr_data[0:16]) d_addr = socket.inet_ntop(socket.AF_INET6, addr_data[16:32]) s_port = struct.unpack(">H", addr_data[32:34])[0] d_port = struct.unpack(">H", addr_data[34:36])[0] proto = "TCP6" elif family == PPFamily.UNSPEC: # No address info provided with PROXY command self.proxy_protocol_info = { "proxy_protocol": "UNSPEC", "client_addr": None, "client_port": None, "proxy_addr": None, "proxy_port": None } return bytearray(buf[total_header_size:]) else: raise InvalidProxyHeader("unsupported address family %d" % family) # Set data self.proxy_protocol_info = { "proxy_protocol": proto, "client_addr": s_addr, "client_port": s_port, "proxy_addr": d_addr, "proxy_port": d_port } return bytearray(buf[total_header_size:]) def parse_request_line(self, line_bytes): bits = [bytes_to_str(bit) for bit in line_bytes.split(b" ", 2)] if len(bits) != 3: raise InvalidRequestLine(bytes_to_str(line_bytes)) # Method: RFC9110 Section 9 self.method = bits[0] # nonstandard restriction, suitable for all IANA registered methods # partially enforced in previous gunicorn versions if not self.cfg.permit_unconventional_http_method: if METHOD_BADCHAR_RE.search(self.method): raise InvalidRequestMethod(self.method) if not 3 <= len(bits[0]) <= 20: raise InvalidRequestMethod(self.method) # standard restriction: RFC9110 token if not TOKEN_RE.fullmatch(self.method): raise InvalidRequestMethod(self.method) # nonstandard and dangerous # methods are merely uppercase by convention, no case-insensitive treatment is intended if self.cfg.casefold_http_method: self.method = self.method.upper() # URI self.uri = bits[1] # Python stdlib explicitly tells us it will not perform validation. # https://docs.python.org/3/library/urllib.parse.html#url-parsing-security # There are *four* `request-target` forms in rfc9112, none of them can be empty: # 1. origin-form, which starts with a slash # 2. absolute-form, which starts with a non-empty scheme # 3. authority-form, (for CONNECT) which contains a colon after the host # 4. asterisk-form, which is an asterisk (`\x2A`) # => manually reject one always invalid URI: empty if len(self.uri) == 0: raise InvalidRequestLine(bytes_to_str(line_bytes)) try: parts = split_request_uri(self.uri) except ValueError: raise InvalidRequestLine(bytes_to_str(line_bytes)) self.path = parts.path or "" self.query = parts.query or "" self.fragment = parts.fragment or "" # Version match = VERSION_RE.fullmatch(bits[2]) if match is None: raise InvalidHTTPVersion(bits[2]) self.version = (int(match.group(1)), int(match.group(2))) if not (1, 0) <= self.version < (2, 0): # if ever relaxing this, carefully review Content-Encoding processing if not self.cfg.permit_unconventional_http_version: raise InvalidHTTPVersion(self.version) def set_body_reader(self): super().set_body_reader() if isinstance(self.body.reader, EOFReader): self.body = Body(LengthReader(self.unreader, 0)) benoitc-gunicorn-f5fb19e/gunicorn/http/parser.py000066400000000000000000000034151514360242400221370ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import ssl from gunicorn.http.message import Request from gunicorn.http.unreader import SocketUnreader, IterUnreader class Parser: mesg_class = None def __init__(self, cfg, source, source_addr): self.cfg = cfg if hasattr(source, "recv"): self.unreader = SocketUnreader(source) else: self.unreader = IterUnreader(source) self.mesg = None self.source_addr = source_addr # request counter (for keepalive connetions) self.req_count = 0 def __iter__(self): return self def finish_body(self): """Discard any unread body of the current message. This should be called before returning a keepalive connection to the poller to ensure the socket doesn't appear readable due to leftover body bytes. """ if self.mesg: try: data = self.mesg.body.read(1024) while data: data = self.mesg.body.read(1024) except ssl.SSLWantReadError: # SSL socket has no more application data available pass def __next__(self): # Stop if HTTP dictates a stop. if self.mesg and self.mesg.should_close(): raise StopIteration() # Discard any unread body of the previous message self.finish_body() # Parse the next request self.req_count += 1 self.mesg = self.mesg_class(self.cfg, self.unreader, self.source_addr, self.req_count) if not self.mesg: raise StopIteration() return self.mesg next = __next__ class RequestParser(Parser): mesg_class = Request benoitc-gunicorn-f5fb19e/gunicorn/http/unreader.py000066400000000000000000000036631514360242400224550ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import os # Classes that can undo reading data from # a given type of data source. class Unreader: def __init__(self): self.buf = io.BytesIO() def chunk(self): raise NotImplementedError() def read(self, size=None): if size is not None and not isinstance(size, int): raise TypeError("size parameter must be an int or long.") if size is not None: if size == 0: return b"" if size < 0: size = None self.buf.seek(0, os.SEEK_END) if size is None and self.buf.tell(): ret = self.buf.getvalue() self.buf = io.BytesIO() return ret if size is None: d = self.chunk() return d while self.buf.tell() < size: chunk = self.chunk() if not chunk: ret = self.buf.getvalue() self.buf = io.BytesIO() return ret self.buf.write(chunk) data = self.buf.getvalue() self.buf = io.BytesIO() self.buf.write(data[size:]) return data[:size] def unread(self, data): rest = self.buf.getvalue() self.buf = io.BytesIO() self.buf.write(data) self.buf.write(rest) class SocketUnreader(Unreader): def __init__(self, sock, max_chunk=8192): super().__init__() self.sock = sock self.mxchunk = max_chunk def chunk(self): return self.sock.recv(self.mxchunk) class IterUnreader(Unreader): def __init__(self, iterable): super().__init__() self.iter = iter(iterable) def chunk(self): if not self.iter: return b"" try: return next(self.iter) except StopIteration: self.iter = None return b"" benoitc-gunicorn-f5fb19e/gunicorn/http/wsgi.py000066400000000000000000000353141514360242400216170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import logging import os import re import sys from gunicorn.http.message import TOKEN_RE from gunicorn.http.errors import ConfigurationProblem, InvalidHeader, InvalidHeaderName from gunicorn import SERVER_SOFTWARE, SERVER from gunicorn import util # Send files in at most 1GB blocks as some operating systems can have problems # with sending files in blocks over 2GB. BLKSIZE = 0x3FFFFFFF # RFC9110 5.5: field-vchar = VCHAR / obs-text # RFC4234 B.1: VCHAR = 0x21-x07E = printable ASCII HEADER_VALUE_RE = re.compile(r'[ \t\x21-\x7e\x80-\xff]*') log = logging.getLogger(__name__) class FileWrapper: def __init__(self, filelike, blksize=8192): self.filelike = filelike self.blksize = blksize if hasattr(filelike, 'close'): self.close = filelike.close def __getitem__(self, key): data = self.filelike.read(self.blksize) if data: return data raise IndexError class WSGIErrorsWrapper(io.RawIOBase): def __init__(self, cfg): # There is no public __init__ method for RawIOBase so # we don't need to call super() in the __init__ method. # pylint: disable=super-init-not-called errorlog = logging.getLogger("gunicorn.error") handlers = errorlog.handlers self.streams = [] if cfg.errorlog == "-": self.streams.append(sys.stderr) handlers = handlers[1:] for h in handlers: if hasattr(h, "stream"): self.streams.append(h.stream) def write(self, data): for stream in self.streams: try: stream.write(data) except UnicodeError: stream.write(data.encode("UTF-8")) stream.flush() def base_environ(cfg): return { "wsgi.errors": WSGIErrorsWrapper(cfg), "wsgi.version": (1, 0), "wsgi.multithread": False, "wsgi.multiprocess": (cfg.workers > 1), "wsgi.run_once": False, "wsgi.file_wrapper": FileWrapper, "wsgi.input_terminated": True, "SERVER_SOFTWARE": SERVER_SOFTWARE, } def default_environ(req, sock, cfg): env = base_environ(cfg) env.update({ "wsgi.input": req.body, "gunicorn.socket": sock, "REQUEST_METHOD": req.method, "QUERY_STRING": req.query, "RAW_URI": req.uri, "SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version]) }) return env def proxy_environ(req): info = req.proxy_protocol_info if not info: return {} return { "PROXY_PROTOCOL": info["proxy_protocol"], "REMOTE_ADDR": info["client_addr"], "REMOTE_PORT": str(info["client_port"]), "PROXY_ADDR": info["proxy_addr"], "PROXY_PORT": str(info["proxy_port"]), } def _make_early_hints_callback(req, sock, resp): """Create a wsgi.early_hints callback for sending 103 Early Hints. This allows WSGI applications to send 103 Early Hints responses before the final response, enabling browsers to preload resources. Args: req: The request object sock: The socket to write to resp: The Response object to check if headers have been sent Returns: A callback function that accepts a list of (name, value) header tuples and sends a 103 Early Hints response. Note: - Early hints are only sent for HTTP/1.1 or later clients - HTTP/1.0 clients will silently ignore the callback - Multiple calls are allowed (sending multiple 103 responses) - Calls after response has started are silently ignored """ def send_early_hints(headers): """Send 103 Early Hints response. Args: headers: List of (name, value) header tuples, typically Link headers Example: [('Link', '; rel=preload; as=style')] """ # Don't send after response has started - would break framing if resp.headers_sent: return # Don't send to HTTP/1.0 clients - they don't support 1xx responses if req.version < (1, 1): return # Build 103 response response = b"HTTP/1.1 103 Early Hints\r\n" for name, value in headers: if isinstance(name, bytes): name = name.decode('latin-1') if isinstance(value, bytes): value = value.decode('latin-1') response += f"{name}: {value}\r\n".encode('latin-1') response += b"\r\n" util.write(sock, response) return send_early_hints def create(req, sock, client, server, cfg): resp = Response(req, sock, cfg) # set initial environ environ = default_environ(req, sock, cfg) # default variables host = None script_name = os.environ.get("SCRIPT_NAME", "") if req._expected_100_continue: sock.send(b"HTTP/1.1 100 Continue\r\n\r\n") # rfc9112: Expect MUST be forwarded if the request is forwarded # N.B. gunicorn just sends at most one - application might send another # add the headers to the environ for hdr_name, hdr_value in req.headers: if hdr_name == 'HOST': host = hdr_value elif hdr_name == "SCRIPT_NAME": script_name = hdr_value elif hdr_name == "CONTENT-TYPE": environ['CONTENT_TYPE'] = hdr_value continue elif hdr_name == "CONTENT-LENGTH": environ['CONTENT_LENGTH'] = hdr_value continue # do not change lightly, this is a common source of security problems # RFC9110 Section 17.10 discourages ambiguous or incomplete mappings key = 'HTTP_' + hdr_name.replace('-', '_') if key in environ: hdr_value = "%s,%s" % (environ[key], hdr_value) environ[key] = hdr_value # set the url scheme environ['wsgi.url_scheme'] = req.scheme # set the REMOTE_* keys in environ # authors should be aware that REMOTE_HOST and REMOTE_ADDR # may not qualify the remote addr: # http://www.ietf.org/rfc/rfc3875 if isinstance(client, str): environ['REMOTE_ADDR'] = client elif isinstance(client, bytes): environ['REMOTE_ADDR'] = client.decode() else: environ['REMOTE_ADDR'] = client[0] environ['REMOTE_PORT'] = str(client[1]) # handle the SERVER_* # Normally only the application should use the Host header but since the # WSGI spec doesn't support unix sockets, we are using it to create # viable SERVER_* if possible. if isinstance(server, str): server = server.split(":") if len(server) == 1: # unix socket if host: server = host.split(':') if len(server) == 1: if req.scheme == "http": server.append(80) elif req.scheme == "https": server.append(443) else: server.append('') else: # no host header given which means that we are not behind a # proxy, so append an empty port. server.append('') environ['SERVER_NAME'] = server[0] environ['SERVER_PORT'] = str(server[1]) # set the path and script name path_info = req.path if script_name: if not path_info.startswith(script_name): raise ConfigurationProblem( "Request path %r does not start with SCRIPT_NAME %r" % (path_info, script_name)) path_info = path_info[len(script_name):] environ['PATH_INFO'] = util.unquote_to_wsgi_str(path_info) environ['SCRIPT_NAME'] = script_name # override the environ with the correct remote and server address if # we are behind a proxy using the proxy protocol. environ.update(proxy_environ(req)) # Add wsgi.early_hints callback for sending 103 Early Hints environ['wsgi.early_hints'] = _make_early_hints_callback(req, sock, resp) # Add HTTP/2 stream priority if available if hasattr(req, 'priority_weight'): environ['gunicorn.http2.priority_weight'] = req.priority_weight environ['gunicorn.http2.priority_depends_on'] = req.priority_depends_on return resp, environ class Response: def __init__(self, req, sock, cfg): self.req = req self.sock = sock self.version = SERVER self.status = None self.chunked = False self.must_close = False self.headers = [] self.headers_sent = False self.response_length = None self.sent = 0 self.upgrade = False self.cfg = cfg def force_close(self): self.must_close = True def should_close(self): if self.must_close or self.req.should_close(): return True if self.response_length is not None or self.chunked: return False if self.req.method == 'HEAD': return False if self.status_code < 200 or self.status_code in (204, 304): return False return True def start_response(self, status, headers, exc_info=None): if exc_info: try: if self.status and self.headers_sent: util.reraise(exc_info[0], exc_info[1], exc_info[2]) finally: exc_info = None elif self.status is not None: raise AssertionError("Response headers already set!") self.status = status # get the status code from the response here so we can use it to check # the need for the connection header later without parsing the string # each time. try: self.status_code = int(self.status.split()[0]) except ValueError: self.status_code = None self.process_headers(headers) self.chunked = self.is_chunked() return self.write def process_headers(self, headers): for name, value in headers: if not isinstance(name, str): raise TypeError('%r is not a string' % name) if not TOKEN_RE.fullmatch(name): raise InvalidHeaderName('%r' % name) if not isinstance(value, str): raise TypeError('%r is not a string' % value) if not HEADER_VALUE_RE.fullmatch(value): raise InvalidHeader('%r' % value) # RFC9110 5.5 value = value.strip(" \t") lname = name.lower() if lname == "content-length": self.response_length = int(value) elif util.is_hoppish(name): if lname == "connection": # handle websocket if value.lower() == "upgrade": self.upgrade = True elif lname == "upgrade": if value.lower() == "websocket": self.headers.append((name, value)) # ignore hopbyhop headers continue self.headers.append((name, value)) def is_chunked(self): # Only use chunked responses when the client is # speaking HTTP/1.1 or newer and there was # no Content-Length header set. if self.response_length is not None: return False elif self.req.version <= (1, 0): return False elif self.req.method == 'HEAD': # Responses to a HEAD request MUST NOT contain a response body. return False elif self.status_code in (204, 304): # Do not use chunked responses when the response is guaranteed to # not have a response body. return False return True def default_headers(self): # set the connection header if self.upgrade: connection = "upgrade" elif self.should_close(): connection = "close" else: connection = "keep-alive" headers = [ "HTTP/%s.%s %s\r\n" % (self.req.version[0], self.req.version[1], self.status), "Server: %s\r\n" % self.version, "Date: %s\r\n" % util.http_date(), "Connection: %s\r\n" % connection ] if self.chunked: headers.append("Transfer-Encoding: chunked\r\n") return headers def send_headers(self): if self.headers_sent: return tosend = self.default_headers() tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers]) header_str = "%s\r\n" % "".join(tosend) util.write(self.sock, util.to_bytestring(header_str, "latin-1")) self.headers_sent = True def write(self, arg): self.send_headers() if not isinstance(arg, bytes): raise TypeError('%r is not a byte' % arg) arglen = len(arg) tosend = arglen if self.response_length is not None: if self.sent >= self.response_length: # Never write more than self.response_length bytes return tosend = min(self.response_length - self.sent, tosend) if tosend < arglen: arg = arg[:tosend] # Sending an empty chunk signals the end of the # response and prematurely closes the response if self.chunked and tosend == 0: return self.sent += tosend util.write(self.sock, arg, self.chunked) def can_sendfile(self): return self.cfg.sendfile is not False def sendfile(self, respiter): if self.cfg.is_ssl or not self.can_sendfile(): return False if not util.has_fileno(respiter.filelike): return False fileno = respiter.filelike.fileno() try: offset = os.lseek(fileno, 0, os.SEEK_CUR) if self.response_length is None: filesize = os.fstat(fileno).st_size nbytes = filesize - offset else: nbytes = self.response_length except (OSError, io.UnsupportedOperation): return False self.send_headers() if self.is_chunked(): chunk_size = "%X\r\n" % nbytes self.sock.sendall(chunk_size.encode('utf-8')) if nbytes > 0: self.sock.sendfile(respiter.filelike, offset=offset, count=nbytes) if self.is_chunked(): self.sock.sendall(b"\r\n") os.lseek(fileno, offset, os.SEEK_SET) return True def write_file(self, respiter): if not self.sendfile(respiter): for item in respiter: self.write(item) def close(self): if not self.headers_sent: self.send_headers() if self.chunked: util.write_chunk(self.sock, b"") benoitc-gunicorn-f5fb19e/gunicorn/http2/000077500000000000000000000000001514360242400203505ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/http2/__init__.py000066400000000000000000000042641514360242400224670ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 support for Gunicorn. This module provides HTTP/2 protocol support using the hyper-h2 library. HTTP/2 requires TLS with ALPN negotiation. """ H2_MIN_VERSION = (4, 1, 0) _h2_available = None _h2_version = None def is_http2_available(): """Check if HTTP/2 support is available. Returns: bool: True if the h2 library is installed with minimum required version. """ global _h2_available, _h2_version # pylint: disable=global-statement if _h2_available is not None: return _h2_available try: import h2 version_str = getattr(h2, '__version__', '0.0.0') version_parts = tuple(int(x) for x in version_str.split('.')[:3]) _h2_version = version_parts _h2_available = version_parts >= H2_MIN_VERSION except ImportError: _h2_available = False _h2_version = None return _h2_available def get_h2_version(): """Get the installed h2 library version. Returns: tuple: Version tuple (major, minor, patch) or None if not installed. """ if _h2_version is None: is_http2_available() # Populate _h2_version return _h2_version def get_http2_connection_class(): """Get the HTTP2ServerConnection class if h2 is available. Returns: HTTP2ServerConnection class, or raises HTTP2NotAvailable """ if not is_http2_available(): from .errors import HTTP2NotAvailable raise HTTP2NotAvailable() from .connection import HTTP2ServerConnection return HTTP2ServerConnection def get_async_http2_connection_class(): """Get the AsyncHTTP2Connection class if h2 is available. Returns: AsyncHTTP2Connection class, or raises HTTP2NotAvailable """ if not is_http2_available(): from .errors import HTTP2NotAvailable raise HTTP2NotAvailable() from .async_connection import AsyncHTTP2Connection return AsyncHTTP2Connection __all__ = [ 'is_http2_available', 'get_h2_version', 'get_http2_connection_class', 'get_async_http2_connection_class', 'H2_MIN_VERSION', ] benoitc-gunicorn-f5fb19e/gunicorn/http2/async_connection.py000066400000000000000000000502411514360242400242600ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Async HTTP/2 server connection implementation for ASGI workers. Uses the hyper-h2 library for HTTP/2 protocol handling with asyncio for non-blocking I/O. """ import asyncio from .errors import ( HTTP2Error, HTTP2ProtocolError, HTTP2ConnectionError, HTTP2NotAvailable, HTTP2ErrorCode, ) from .stream import HTTP2Stream from .request import HTTP2Request # Import h2 lazily to allow graceful fallback _h2 = None _h2_config = None _h2_events = None _h2_exceptions = None _h2_settings = None def _import_h2(): """Lazily import h2 library components.""" global _h2, _h2_config, _h2_events, _h2_exceptions, _h2_settings # pylint: disable=global-statement if _h2 is not None: return try: import h2.connection as _h2 import h2.config as _h2_config import h2.events as _h2_events import h2.exceptions as _h2_exceptions import h2.settings as _h2_settings except ImportError: raise HTTP2NotAvailable() class AsyncHTTP2Connection: """Async HTTP/2 server-side connection handler for ASGI. Manages the HTTP/2 connection state and multiplexed streams using asyncio for non-blocking I/O operations. """ # Default buffer size for socket reads READ_BUFFER_SIZE = 65536 def __init__(self, cfg, reader, writer, client_addr): """Initialize an async HTTP/2 server connection. Args: cfg: Gunicorn configuration object reader: asyncio StreamReader writer: asyncio StreamWriter client_addr: Client address tuple (host, port) Raises: HTTP2NotAvailable: If h2 library is not installed """ _import_h2() self.cfg = cfg self.reader = reader self.writer = writer self.client_addr = client_addr # Active streams indexed by stream ID self.streams = {} # Queue of completed requests for the worker self._request_queue = asyncio.Queue() # Connection settings from config self.initial_window_size = cfg.http2_initial_window_size self.max_concurrent_streams = cfg.http2_max_concurrent_streams self.max_frame_size = cfg.http2_max_frame_size self.max_header_list_size = cfg.http2_max_header_list_size # Initialize h2 connection config = _h2_config.H2Configuration( client_side=False, header_encoding='utf-8', ) self.h2_conn = _h2.H2Connection(config=config) # Connection state self._closed = False self._initialized = False self._receive_task = None async def initiate_connection(self): """Send initial HTTP/2 settings to client. Should be called after the SSL handshake completes and before processing any data. """ if self._initialized: return # Update local settings before initiating self.h2_conn.update_settings({ _h2_settings.SettingCodes.MAX_CONCURRENT_STREAMS: self.max_concurrent_streams, _h2_settings.SettingCodes.INITIAL_WINDOW_SIZE: self.initial_window_size, _h2_settings.SettingCodes.MAX_FRAME_SIZE: self.max_frame_size, _h2_settings.SettingCodes.MAX_HEADER_LIST_SIZE: self.max_header_list_size, }) self.h2_conn.initiate_connection() await self._send_pending_data() self._initialized = True async def receive_data(self, timeout=None): """Receive data and return completed requests. Args: timeout: Optional timeout in seconds for read operation Returns: list: List of HTTP2Request objects for completed requests Raises: HTTP2ConnectionError: On protocol or connection errors asyncio.TimeoutError: If timeout expires """ try: if timeout is not None: data = await asyncio.wait_for( self.reader.read(self.READ_BUFFER_SIZE), timeout=timeout ) else: data = await self.reader.read(self.READ_BUFFER_SIZE) except (OSError, IOError) as e: raise HTTP2ConnectionError(f"Socket read error: {e}") if not data: # Connection closed by peer self._closed = True return [] # Feed data to h2 # Note: Specific exceptions must come before ProtocolError (their parent class) try: events = self.h2_conn.receive_data(data) except _h2_exceptions.FlowControlError as e: # Send GOAWAY with FLOW_CONTROL_ERROR await self.close(error_code=HTTP2ErrorCode.FLOW_CONTROL_ERROR) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.FrameTooLargeError as e: # Send GOAWAY with FRAME_SIZE_ERROR await self.close(error_code=HTTP2ErrorCode.FRAME_SIZE_ERROR) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.InvalidSettingsValueError as e: # Use error_code from h2 exception (RFC 7540 Section 6.5.2): # INITIAL_WINDOW_SIZE > 2^31-1 gives FLOW_CONTROL_ERROR # Other invalid settings give PROTOCOL_ERROR error_code = getattr(e, 'error_code', None) if error_code is not None: await self.close(error_code=error_code) else: await self.close(error_code=HTTP2ErrorCode.PROTOCOL_ERROR) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.TooManyStreamsError as e: # Send GOAWAY with REFUSED_STREAM await self.close(error_code=HTTP2ErrorCode.REFUSED_STREAM) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.ProtocolError as e: # Send GOAWAY with PROTOCOL_ERROR before raising await self.close(error_code=HTTP2ErrorCode.PROTOCOL_ERROR) raise HTTP2ProtocolError(str(e)) # Process events completed_requests = [] for event in events: request = self._handle_event(event) if request is not None: completed_requests.append(request) # Send any pending data (WINDOW_UPDATE, etc.) await self._send_pending_data() return completed_requests def _handle_event(self, event): """Handle a single h2 event. Args: event: h2 event object Returns: HTTP2Request if a request is complete, None otherwise """ if isinstance(event, _h2_events.RequestReceived): return self._handle_request_received(event) elif isinstance(event, _h2_events.DataReceived): return self._handle_data_received(event) elif isinstance(event, _h2_events.StreamEnded): return self._handle_stream_ended(event) elif isinstance(event, _h2_events.StreamReset): self._handle_stream_reset(event) elif isinstance(event, _h2_events.WindowUpdated): pass # Flow control update, handled by h2 elif isinstance(event, _h2_events.PriorityUpdated): self._handle_priority_updated(event) elif isinstance(event, _h2_events.SettingsAcknowledged): pass # Settings ACK received elif isinstance(event, _h2_events.ConnectionTerminated): self._handle_connection_terminated(event) elif isinstance(event, _h2_events.TrailersReceived): return self._handle_trailers_received(event) return None def _handle_request_received(self, event): """Handle RequestReceived event (HEADERS frame).""" stream_id = event.stream_id headers = event.headers # Create new stream stream = HTTP2Stream(stream_id, self) self.streams[stream_id] = stream # Process headers stream.receive_headers(headers, end_stream=False) def _handle_data_received(self, event): """Handle DataReceived event.""" stream_id = event.stream_id data = event.data stream = self.streams.get(stream_id) if stream is None: return None stream.receive_data(data, end_stream=False) # Increment flow control windows (only if data received) if len(data) > 0: try: # Update stream-level window self.h2_conn.increment_flow_control_window(len(data), stream_id=stream_id) # Update connection-level window self.h2_conn.increment_flow_control_window(len(data), stream_id=None) except (ValueError, _h2_exceptions.FlowControlError): # Window overflow - prepare GOAWAY with FLOW_CONTROL_ERROR # (will be sent by receive_data's _send_pending_data call) self._closed = True try: self.h2_conn.close_connection(error_code=HTTP2ErrorCode.FLOW_CONTROL_ERROR) except Exception: pass return None def _handle_stream_ended(self, event): """Handle StreamEnded event.""" stream_id = event.stream_id stream = self.streams.get(stream_id) if stream is None: return None stream.request_complete = True return HTTP2Request(stream, self.cfg, self.client_addr) def _handle_stream_reset(self, event): """Handle StreamReset event.""" stream_id = event.stream_id stream = self.streams.get(stream_id) if stream is not None: stream.reset(event.error_code) def _handle_connection_terminated(self, event): """Handle ConnectionTerminated event.""" self._closed = True def _handle_trailers_received(self, event): """Handle TrailersReceived event.""" stream_id = event.stream_id stream = self.streams.get(stream_id) if stream is None: return None stream.receive_trailers(event.headers) return HTTP2Request(stream, self.cfg, self.client_addr) def _handle_priority_updated(self, event): """Handle PriorityUpdated event (PRIORITY frame). Args: event: PriorityUpdated event with priority info """ stream = self.streams.get(event.stream_id) if stream is not None: stream.update_priority( weight=event.weight, depends_on=event.depends_on, exclusive=event.exclusive ) async def send_informational(self, stream_id, status, headers): """Send an informational response (1xx) on a stream. This is used for 103 Early Hints and other 1xx responses. Informational responses are sent before the final response and do not end the stream. Args: stream_id: The stream ID status: HTTP status code (100-199) headers: List of (name, value) header tuples Raises: HTTP2Error: If status is not in 1xx range """ if status < 100 or status >= 200: raise HTTP2Error(f"Invalid informational status: {status}") stream = self.streams.get(stream_id) if stream is None: raise HTTP2Error(f"Stream {stream_id} not found") # Build headers with :status pseudo-header response_headers = [(':status', str(status))] for name, value in headers: # HTTP/2 headers must be lowercase response_headers.append((name.lower(), str(value))) # Send headers with end_stream=False (informational, more to follow) self.h2_conn.send_headers(stream_id, response_headers, end_stream=False) await self._send_pending_data() async def send_response(self, stream_id, status, headers, body=None): """Send a response on a stream. Args: stream_id: The stream ID to respond on status: HTTP status code (int) headers: List of (name, value) header tuples body: Optional response body bytes Returns: bool: True if response sent, False if stream was already closed """ stream = self.streams.get(stream_id) if stream is None: # Stream was already cleaned up (reset/closed) - return gracefully return False # Build response headers with :status pseudo-header response_headers = [(':status', str(status))] for name, value in headers: response_headers.append((name.lower(), str(value))) end_stream = body is None or len(body) == 0 try: # Send headers self.h2_conn.send_headers(stream_id, response_headers, end_stream=end_stream) stream.send_headers(response_headers, end_stream=end_stream) await self._send_pending_data() # Send body if present if body and len(body) > 0: await self.send_data(stream_id, body, end_stream=True) return True except _h2_exceptions.StreamClosedError: # Stream was reset by client - clean up gracefully stream.close() self.cleanup_stream(stream_id) return False async def _wait_for_flow_control_window(self, stream_id): """Wait for flow control window to become positive. Returns: int: Available window size, or -1 if waiting failed """ max_wait_attempts = 50 # ~5 seconds at 100ms per attempt for _ in range(max_wait_attempts): available = self.h2_conn.local_flow_control_window(stream_id) if available > 0: return available # Read more data from connection (may receive WINDOW_UPDATE) try: incoming = await asyncio.wait_for( self.reader.read(self.READ_BUFFER_SIZE), timeout=0.1 ) if incoming: events = self.h2_conn.receive_data(incoming) # Process events but don't create new requests for event in events: if isinstance(event, _h2_events.StreamReset): if event.stream_id == stream_id: return -1 elif isinstance(event, _h2_events.ConnectionTerminated): self._closed = True return -1 await self._send_pending_data() else: # Connection closed self._closed = True return -1 except asyncio.TimeoutError: continue except _h2_exceptions.ProtocolError: return -1 return self.h2_conn.local_flow_control_window(stream_id) async def send_data(self, stream_id, data, end_stream=False): """Send data on a stream. Args: stream_id: The stream ID data: Body data bytes end_stream: Whether this ends the stream Returns: bool: True if data sent, False if stream was already closed """ stream = self.streams.get(stream_id) if stream is None: return False data_to_send = data try: while data_to_send: available = self.h2_conn.local_flow_control_window(stream_id) chunk_size = min(available, self.max_frame_size, len(data_to_send)) if chunk_size <= 0: # Wait for WINDOW_UPDATE per RFC 7540 Section 6.9.2 await self._send_pending_data() available = await self._wait_for_flow_control_window(stream_id) if available <= 0: return False chunk_size = min(available, self.max_frame_size, len(data_to_send)) chunk = data_to_send[:chunk_size] data_to_send = data_to_send[chunk_size:] is_final = end_stream and len(data_to_send) == 0 self.h2_conn.send_data(stream_id, chunk, end_stream=is_final) await self._send_pending_data() stream.send_data(data, end_stream=end_stream) return True except (_h2_exceptions.StreamClosedError, _h2_exceptions.FlowControlError): stream.close() self.cleanup_stream(stream_id) return False async def send_trailers(self, stream_id, trailers): """Send trailing headers on a stream. Trailers are headers sent after the response body, commonly used for gRPC status codes, checksums, and timing information. Args: stream_id: The stream ID trailers: List of (name, value) trailer tuples Raises: HTTP2Error: If stream not found, headers not sent, or pseudo-headers used Returns: bool: True if trailers sent, False if stream was already closed """ stream = self.streams.get(stream_id) if stream is None: # Stream was already cleaned up (reset/closed) - return gracefully return False if not stream.response_headers_sent: # Can't send trailers without headers - return False return False # Validate and normalize trailer headers trailer_headers = [] for name, value in trailers: lname = name.lower() if lname.startswith(':'): raise HTTP2Error(f"Pseudo-header '{name}' not allowed in trailers") trailer_headers.append((lname, str(value))) try: # Send trailers with end_stream=True self.h2_conn.send_headers(stream_id, trailer_headers, end_stream=True) stream.send_trailers(trailer_headers) await self._send_pending_data() return True except _h2_exceptions.StreamClosedError: # Stream was reset by client - clean up gracefully stream.close() self.cleanup_stream(stream_id) return False async def send_error(self, stream_id, status_code, message=None): """Send an error response on a stream.""" body = message.encode() if message else b'' headers = [('content-length', str(len(body)))] if body: headers.append(('content-type', 'text/plain; charset=utf-8')) await self.send_response(stream_id, status_code, headers, body) async def reset_stream(self, stream_id, error_code=0x8): """Reset a stream with RST_STREAM.""" stream = self.streams.get(stream_id) if stream is not None: stream.reset(error_code) self.h2_conn.reset_stream(stream_id, error_code=error_code) await self._send_pending_data() async def close(self, error_code=0x0, last_stream_id=None): """Close the connection gracefully with GOAWAY.""" if self._closed: return self._closed = True if last_stream_id is None: last_stream_id = max(self.streams.keys()) if self.streams else 0 try: self.h2_conn.close_connection(error_code=error_code) await self._send_pending_data() except Exception: pass try: self.writer.close() await self.writer.wait_closed() except Exception: pass async def _send_pending_data(self): """Send any pending data from h2 to the socket.""" data = self.h2_conn.data_to_send() if data: try: self.writer.write(data) await self.writer.drain() except (OSError, IOError) as e: self._closed = True raise HTTP2ConnectionError(f"Socket write error: {e}") @property def is_closed(self): """Check if connection is closed.""" return self._closed def cleanup_stream(self, stream_id): """Remove a stream after processing is complete.""" self.streams.pop(stream_id, None) def __repr__(self): return ( f"" ) __all__ = ['AsyncHTTP2Connection'] benoitc-gunicorn-f5fb19e/gunicorn/http2/connection.py000066400000000000000000000534611514360242400230720ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 server connection implementation. Uses the hyper-h2 library for HTTP/2 protocol handling. """ from io import BytesIO from .errors import ( HTTP2Error, HTTP2ProtocolError, HTTP2ConnectionError, HTTP2NotAvailable, HTTP2ErrorCode, ) from .stream import HTTP2Stream from .request import HTTP2Request # Import h2 lazily to allow graceful fallback _h2 = None _h2_config = None _h2_events = None _h2_exceptions = None _h2_settings = None def _import_h2(): """Lazily import h2 library components.""" global _h2, _h2_config, _h2_events, _h2_exceptions, _h2_settings # pylint: disable=global-statement if _h2 is not None: return try: import h2.connection as _h2 import h2.config as _h2_config import h2.events as _h2_events import h2.exceptions as _h2_exceptions import h2.settings as _h2_settings except ImportError: raise HTTP2NotAvailable() class HTTP2ServerConnection: """HTTP/2 server-side connection handler. Manages the HTTP/2 connection state and multiplexed streams. This class wraps the h2 library and provides a higher-level interface for gunicorn workers. """ # Default buffer size for socket reads READ_BUFFER_SIZE = 65536 def __init__(self, cfg, sock, client_addr): """Initialize an HTTP/2 server connection. Args: cfg: Gunicorn configuration object sock: SSL socket with completed handshake client_addr: Client address tuple (host, port) Raises: HTTP2NotAvailable: If h2 library is not installed """ _import_h2() self.cfg = cfg self.sock = sock self.client_addr = client_addr # Active streams indexed by stream ID self.streams = {} # Completed requests ready for processing self._pending_requests = [] # Connection settings from config self.initial_window_size = cfg.http2_initial_window_size self.max_concurrent_streams = cfg.http2_max_concurrent_streams self.max_frame_size = cfg.http2_max_frame_size self.max_header_list_size = cfg.http2_max_header_list_size # Initialize h2 connection config = _h2_config.H2Configuration( client_side=False, header_encoding='utf-8', ) self.h2_conn = _h2.H2Connection(config=config) # Read buffer for partial frames self._read_buffer = BytesIO() # Connection state self._closed = False self._initialized = False def initiate_connection(self): """Send initial HTTP/2 settings to client. Should be called after the SSL handshake completes and before processing any data. """ if self._initialized: return # Update local settings before initiating self.h2_conn.update_settings({ _h2_settings.SettingCodes.MAX_CONCURRENT_STREAMS: self.max_concurrent_streams, _h2_settings.SettingCodes.INITIAL_WINDOW_SIZE: self.initial_window_size, _h2_settings.SettingCodes.MAX_FRAME_SIZE: self.max_frame_size, _h2_settings.SettingCodes.MAX_HEADER_LIST_SIZE: self.max_header_list_size, }) self.h2_conn.initiate_connection() self._send_pending_data() self._initialized = True def receive_data(self, data=None): """Process received data and return completed requests. Args: data: Optional bytes to process. If None, reads from socket. Returns: list: List of HTTP2Request objects for completed requests Raises: HTTP2ConnectionError: On protocol or connection errors """ if data is None: try: data = self.sock.recv(self.READ_BUFFER_SIZE) except (OSError, IOError) as e: raise HTTP2ConnectionError(f"Socket read error: {e}") if not data: # Connection closed by peer self._closed = True return [] # Feed data to h2 # Note: Specific exceptions must come before ProtocolError (their parent class) try: events = self.h2_conn.receive_data(data) except _h2_exceptions.FlowControlError as e: # Send GOAWAY with FLOW_CONTROL_ERROR self.close(error_code=HTTP2ErrorCode.FLOW_CONTROL_ERROR) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.FrameTooLargeError as e: # Send GOAWAY with FRAME_SIZE_ERROR self.close(error_code=HTTP2ErrorCode.FRAME_SIZE_ERROR) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.InvalidSettingsValueError as e: # Use error_code from h2 exception (RFC 7540 Section 6.5.2): # INITIAL_WINDOW_SIZE > 2^31-1 gives FLOW_CONTROL_ERROR # Other invalid settings give PROTOCOL_ERROR error_code = getattr(e, 'error_code', None) if error_code is not None: self.close(error_code=error_code) else: self.close(error_code=HTTP2ErrorCode.PROTOCOL_ERROR) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.TooManyStreamsError as e: # Send GOAWAY with REFUSED_STREAM self.close(error_code=HTTP2ErrorCode.REFUSED_STREAM) raise HTTP2ProtocolError(str(e)) except _h2_exceptions.ProtocolError as e: # Send GOAWAY with PROTOCOL_ERROR before raising self.close(error_code=HTTP2ErrorCode.PROTOCOL_ERROR) raise HTTP2ProtocolError(str(e)) # Process events completed_requests = [] for event in events: request = self._handle_event(event) if request is not None: completed_requests.append(request) # Send any pending data (WINDOW_UPDATE, etc.) self._send_pending_data() return completed_requests def _handle_event(self, event): """Handle a single h2 event. Args: event: h2 event object Returns: HTTP2Request if a request is complete, None otherwise """ if isinstance(event, _h2_events.RequestReceived): return self._handle_request_received(event) elif isinstance(event, _h2_events.DataReceived): return self._handle_data_received(event) elif isinstance(event, _h2_events.StreamEnded): return self._handle_stream_ended(event) elif isinstance(event, _h2_events.StreamReset): self._handle_stream_reset(event) elif isinstance(event, _h2_events.WindowUpdated): pass # Flow control update, handled by h2 elif isinstance(event, _h2_events.PriorityUpdated): self._handle_priority_updated(event) elif isinstance(event, _h2_events.SettingsAcknowledged): pass # Settings ACK received elif isinstance(event, _h2_events.ConnectionTerminated): self._handle_connection_terminated(event) elif isinstance(event, _h2_events.TrailersReceived): return self._handle_trailers_received(event) return None def _handle_request_received(self, event): """Handle RequestReceived event (HEADERS frame). Args: event: RequestReceived event with headers """ stream_id = event.stream_id headers = event.headers # Create new stream stream = HTTP2Stream(stream_id, self) self.streams[stream_id] = stream # Process headers # The StreamEnded event will come separately for GET/HEAD with no body stream.receive_headers(headers, end_stream=False) def _handle_data_received(self, event): """Handle DataReceived event. Args: event: DataReceived event with body data Returns: None (request completion handled by StreamEnded) """ stream_id = event.stream_id data = event.data stream = self.streams.get(stream_id) if stream is None: # Stream was reset or doesn't exist return None stream.receive_data(data, end_stream=False) # Increment flow control windows (only if data received) if len(data) > 0: try: # Update stream-level window self.h2_conn.increment_flow_control_window(len(data), stream_id=stream_id) # Update connection-level window self.h2_conn.increment_flow_control_window(len(data), stream_id=None) # Send WINDOW_UPDATE frames immediately self._send_pending_data() except (ValueError, _h2_exceptions.FlowControlError): # Window overflow - send FLOW_CONTROL_ERROR and close self.close(error_code=HTTP2ErrorCode.FLOW_CONTROL_ERROR) return None def _handle_stream_ended(self, event): """Handle StreamEnded event. Args: event: StreamEnded event Returns: HTTP2Request for the completed request """ stream_id = event.stream_id stream = self.streams.get(stream_id) if stream is None: return None # Mark stream as request complete stream.request_complete = True # Create request object return HTTP2Request(stream, self.cfg, self.client_addr) def _handle_stream_reset(self, event): """Handle StreamReset event (RST_STREAM frame). Args: event: StreamReset event """ stream_id = event.stream_id stream = self.streams.get(stream_id) if stream is not None: stream.reset(event.error_code) # Keep stream in dict for potential cleanup def _handle_connection_terminated(self, event): """Handle ConnectionTerminated event (GOAWAY frame). Args: event: ConnectionTerminated event """ self._closed = True # Could log event.error_code and event.additional_data def _handle_trailers_received(self, event): """Handle TrailersReceived event. Args: event: TrailersReceived event with trailer headers Returns: HTTP2Request if this completes the request """ stream_id = event.stream_id stream = self.streams.get(stream_id) if stream is None: return None stream.receive_trailers(event.headers) # Trailers always end the request return HTTP2Request(stream, self.cfg, self.client_addr) def _handle_priority_updated(self, event): """Handle PriorityUpdated event (PRIORITY frame). Args: event: PriorityUpdated event with priority info """ stream = self.streams.get(event.stream_id) if stream is not None: stream.update_priority( weight=event.weight, depends_on=event.depends_on, exclusive=event.exclusive ) def send_informational(self, stream_id, status, headers): """Send an informational response (1xx) on a stream. This is used for 103 Early Hints and other 1xx responses. Informational responses are sent before the final response and do not end the stream. Args: stream_id: The stream ID status: HTTP status code (100-199) headers: List of (name, value) header tuples Raises: HTTP2Error: If status is not in 1xx range """ if status < 100 or status >= 200: raise HTTP2Error(f"Invalid informational status: {status}") stream = self.streams.get(stream_id) if stream is None: raise HTTP2Error(f"Stream {stream_id} not found") # Build headers with :status pseudo-header response_headers = [(':status', str(status))] for name, value in headers: # HTTP/2 headers must be lowercase response_headers.append((name.lower(), str(value))) # Send headers with end_stream=False (informational, more to follow) self.h2_conn.send_headers(stream_id, response_headers, end_stream=False) self._send_pending_data() def send_response(self, stream_id, status, headers, body=None): """Send a response on a stream. Args: stream_id: The stream ID to respond on status: HTTP status code (int) headers: List of (name, value) header tuples body: Optional response body bytes Raises: HTTP2Error: If stream not found or in invalid state Returns: bool: True if response sent, False if stream was already closed """ stream = self.streams.get(stream_id) if stream is None: # Stream was already cleaned up (reset/closed) - return gracefully return False # Build response headers with :status pseudo-header response_headers = [(':status', str(status))] for name, value in headers: # HTTP/2 headers must be lowercase response_headers.append((name.lower(), str(value))) end_stream = body is None or len(body) == 0 try: # Send headers self.h2_conn.send_headers(stream_id, response_headers, end_stream=end_stream) stream.send_headers(response_headers, end_stream=end_stream) self._send_pending_data() # Send body if present if body and len(body) > 0: self.send_data(stream_id, body, end_stream=True) return True except _h2_exceptions.StreamClosedError: # Stream was reset by client - clean up gracefully stream.close() self.cleanup_stream(stream_id) return False def _wait_for_flow_control_window(self, stream_id): """Wait for flow control window to become positive. Returns: int: Available window size, or -1 if waiting failed """ import selectors max_wait_attempts = 50 # ~5 seconds at 100ms per attempt try: sel = selectors.DefaultSelector() sel.register(self.sock, selectors.EVENT_READ) except (TypeError, ValueError): # Socket doesn't support selectors (e.g., mock socket) return -1 result = -1 try: for _ in range(max_wait_attempts): available = self.h2_conn.local_flow_control_window(stream_id) if available > 0: result = available break ready = sel.select(timeout=0.1) if ready: try: incoming = self.sock.recv(self.READ_BUFFER_SIZE) except (OSError, IOError, _h2_exceptions.ProtocolError): break if not incoming: self._closed = True break try: events = self.h2_conn.receive_data(incoming) except _h2_exceptions.ProtocolError: break for event in events: if isinstance(event, _h2_events.StreamReset): if event.stream_id == stream_id: result = -1 break elif isinstance(event, _h2_events.ConnectionTerminated): self._closed = True result = -1 break else: self._send_pending_data() continue break # Break outer loop if inner loop broke else: # Loop completed without break - check final window result = self.h2_conn.local_flow_control_window(stream_id) finally: sel.close() return result def send_data(self, stream_id, data, end_stream=False): """Send data on a stream. Args: stream_id: The stream ID data: Body data bytes end_stream: Whether this ends the stream Returns: bool: True if data sent, False if stream was already closed """ stream = self.streams.get(stream_id) if stream is None: return False data_to_send = data try: while data_to_send: available = self.h2_conn.local_flow_control_window(stream_id) chunk_size = min(available, self.max_frame_size, len(data_to_send)) if chunk_size <= 0: # Wait for WINDOW_UPDATE per RFC 7540 Section 6.9.2 self._send_pending_data() available = self._wait_for_flow_control_window(stream_id) if available <= 0: return False chunk_size = min(available, self.max_frame_size, len(data_to_send)) chunk = data_to_send[:chunk_size] data_to_send = data_to_send[chunk_size:] is_final = end_stream and len(data_to_send) == 0 self.h2_conn.send_data(stream_id, chunk, end_stream=is_final) self._send_pending_data() stream.send_data(data, end_stream=end_stream) return True except (_h2_exceptions.StreamClosedError, _h2_exceptions.FlowControlError): # Stream was reset by client or flow control error - clean up gracefully stream.close() self.cleanup_stream(stream_id) return False def send_trailers(self, stream_id, trailers): """Send trailing headers on a stream. Trailers are headers sent after the response body, commonly used for gRPC status codes, checksums, and timing information. Args: stream_id: The stream ID trailers: List of (name, value) trailer tuples Raises: HTTP2Error: If stream not found, headers not sent, or pseudo-headers used Returns: bool: True if trailers sent, False if stream was already closed """ stream = self.streams.get(stream_id) if stream is None: # Stream was already cleaned up (reset/closed) - return gracefully return False if not stream.response_headers_sent: # Can't send trailers without headers - return False return False # Validate and normalize trailer headers trailer_headers = [] for name, value in trailers: lname = name.lower() if lname.startswith(':'): raise HTTP2Error(f"Pseudo-header '{name}' not allowed in trailers") trailer_headers.append((lname, str(value))) try: # Send trailers with end_stream=True self.h2_conn.send_headers(stream_id, trailer_headers, end_stream=True) stream.send_trailers(trailer_headers) self._send_pending_data() return True except _h2_exceptions.StreamClosedError: # Stream was reset by client - clean up gracefully stream.close() self.cleanup_stream(stream_id) return False def send_error(self, stream_id, status_code, message=None): """Send an error response on a stream. Args: stream_id: The stream ID status_code: HTTP status code message: Optional error message body """ body = message.encode() if message else b'' headers = [('content-length', str(len(body)))] if body: headers.append(('content-type', 'text/plain; charset=utf-8')) self.send_response(stream_id, status_code, headers, body) def reset_stream(self, stream_id, error_code=0x8): """Reset a stream with RST_STREAM. Args: stream_id: The stream ID to reset error_code: HTTP/2 error code (default: CANCEL) """ stream = self.streams.get(stream_id) if stream is not None: stream.reset(error_code) self.h2_conn.reset_stream(stream_id, error_code=error_code) self._send_pending_data() def close(self, error_code=0x0, last_stream_id=None): """Close the connection gracefully with GOAWAY. Args: error_code: HTTP/2 error code (default: NO_ERROR) last_stream_id: Last processed stream ID (default: highest) """ if self._closed: return self._closed = True if last_stream_id is None: # Use highest stream ID we've seen last_stream_id = max(self.streams.keys()) if self.streams else 0 try: self.h2_conn.close_connection(error_code=error_code) self._send_pending_data() except Exception: pass # Best effort def _send_pending_data(self): """Send any pending data from h2 to the socket.""" data = self.h2_conn.data_to_send() if data: try: self.sock.sendall(data) except (OSError, IOError) as e: self._closed = True raise HTTP2ConnectionError(f"Socket write error: {e}") @property def is_closed(self): """Check if connection is closed.""" return self._closed def cleanup_stream(self, stream_id): """Remove a stream after processing is complete. Args: stream_id: The stream ID to clean up """ self.streams.pop(stream_id, None) def __repr__(self): return ( f"" ) __all__ = ['HTTP2ServerConnection'] benoitc-gunicorn-f5fb19e/gunicorn/http2/errors.py000066400000000000000000000073611514360242400222450ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 specific exceptions. These exceptions map to HTTP/2 error codes defined in RFC 7540. """ class HTTP2ErrorCode: """HTTP/2 Error Codes (RFC 7540 Section 7).""" NO_ERROR = 0x0 PROTOCOL_ERROR = 0x1 INTERNAL_ERROR = 0x2 FLOW_CONTROL_ERROR = 0x3 SETTINGS_TIMEOUT = 0x4 STREAM_CLOSED = 0x5 FRAME_SIZE_ERROR = 0x6 REFUSED_STREAM = 0x7 CANCEL = 0x8 COMPRESSION_ERROR = 0x9 CONNECT_ERROR = 0xa ENHANCE_YOUR_CALM = 0xb INADEQUATE_SECURITY = 0xc HTTP_1_1_REQUIRED = 0xd class HTTP2Error(Exception): """Base exception for HTTP/2 errors.""" error_code = 0x0 # NO_ERROR def __init__(self, message=None, error_code=None): self.message = message or self.__class__.__doc__ if error_code is not None: self.error_code = error_code super().__init__(self.message) class HTTP2ProtocolError(HTTP2Error): """Protocol error detected.""" error_code = 0x1 # PROTOCOL_ERROR class HTTP2InternalError(HTTP2Error): """Internal error occurred.""" error_code = 0x2 # INTERNAL_ERROR class HTTP2FlowControlError(HTTP2Error): """Flow control limits exceeded.""" error_code = 0x3 # FLOW_CONTROL_ERROR class HTTP2SettingsTimeout(HTTP2Error): """Settings acknowledgment timeout.""" error_code = 0x4 # SETTINGS_TIMEOUT class HTTP2StreamClosed(HTTP2Error): """Stream was closed.""" error_code = 0x5 # STREAM_CLOSED class HTTP2FrameSizeError(HTTP2Error): """Frame size is incorrect.""" error_code = 0x6 # FRAME_SIZE_ERROR class HTTP2RefusedStream(HTTP2Error): """Stream was refused.""" error_code = 0x7 # REFUSED_STREAM class HTTP2Cancel(HTTP2Error): """Stream was cancelled.""" error_code = 0x8 # CANCEL class HTTP2CompressionError(HTTP2Error): """Compression state error.""" error_code = 0x9 # COMPRESSION_ERROR class HTTP2ConnectError(HTTP2Error): """Connection error during CONNECT.""" error_code = 0xa # CONNECT_ERROR class HTTP2EnhanceYourCalm(HTTP2Error): """Peer is generating excessive load.""" error_code = 0xb # ENHANCE_YOUR_CALM class HTTP2InadequateSecurity(HTTP2Error): """Transport security is inadequate.""" error_code = 0xc # INADEQUATE_SECURITY class HTTP2RequiresHTTP11(HTTP2Error): """HTTP/1.1 is required for this request.""" error_code = 0xd # HTTP_1_1_REQUIRED class HTTP2StreamError(HTTP2Error): """Error specific to a single stream.""" def __init__(self, stream_id, message=None, error_code=None): self.stream_id = stream_id super().__init__(message, error_code) def __str__(self): return f"Stream {self.stream_id}: {self.message}" class HTTP2ConnectionError(HTTP2Error): """Error affecting the entire connection.""" class HTTP2ConfigurationError(HTTP2Error): """Invalid HTTP/2 configuration.""" class HTTP2NotAvailable(HTTP2Error): """HTTP/2 support is not available (h2 library not installed).""" def __init__(self, message=None): message = message or "HTTP/2 requires the h2 library: pip install gunicorn[http2]" super().__init__(message) __all__ = [ 'HTTP2ErrorCode', 'HTTP2Error', 'HTTP2ProtocolError', 'HTTP2InternalError', 'HTTP2FlowControlError', 'HTTP2SettingsTimeout', 'HTTP2StreamClosed', 'HTTP2FrameSizeError', 'HTTP2RefusedStream', 'HTTP2Cancel', 'HTTP2CompressionError', 'HTTP2ConnectError', 'HTTP2EnhanceYourCalm', 'HTTP2InadequateSecurity', 'HTTP2RequiresHTTP11', 'HTTP2StreamError', 'HTTP2ConnectionError', 'HTTP2ConfigurationError', 'HTTP2NotAvailable', ] benoitc-gunicorn-f5fb19e/gunicorn/http2/request.py000066400000000000000000000145301514360242400224150ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 request wrapper. Provides a Request-compatible interface for HTTP/2 streams. """ from io import BytesIO from gunicorn.util import split_request_uri class HTTP2Body: """Body wrapper for HTTP/2 request data. Provides a file-like interface to the request body, compatible with gunicorn's Body class expectations. """ def __init__(self, data): """Initialize with body data. Args: data: bytes containing the request body """ self._data = BytesIO(data) self._len = len(data) def read(self, size=None): """Read data from the body. Args: size: Number of bytes to read, or None for all remaining Returns: bytes: The requested data """ if size is None: return self._data.read() return self._data.read(size) def readline(self, size=None): """Read a line from the body. Args: size: Maximum bytes to read Returns: bytes: A line of data """ if size is None: return self._data.readline() return self._data.readline(size) def readlines(self, hint=None): """Read all lines from the body. Args: hint: Approximate byte count hint Returns: list: List of lines """ return self._data.readlines(hint) def __iter__(self): """Iterate over lines in the body.""" return iter(self._data) def __len__(self): """Return the content length.""" return self._len def close(self): """Close the body stream.""" self._data.close() class HTTP2Request: """HTTP/2 request wrapper compatible with gunicorn Request interface. Wraps an HTTP2Stream to provide the same interface as the HTTP/1.x Request class, allowing workers to handle HTTP/2 requests using existing code paths. """ def __init__(self, stream, cfg, peer_addr): """Initialize from an HTTP/2 stream. Args: stream: HTTP2Stream instance with received headers/body cfg: Gunicorn configuration object peer_addr: Client address tuple (host, port) """ self.stream = stream self.cfg = cfg self.peer_addr = peer_addr self.remote_addr = peer_addr # HTTP/2 version tuple self.version = (2, 0) # Parse pseudo-headers pseudo = stream.get_pseudo_headers() self.method = pseudo.get(':method', 'GET') self.scheme = pseudo.get(':scheme', 'https') authority = pseudo.get(':authority', '') path = pseudo.get(':path', '/') # Parse the path into components self.uri = path try: parts = split_request_uri(path) self.path = parts.path or "" self.query = parts.query or "" self.fragment = parts.fragment or "" except ValueError: self.path = path self.query = "" self.fragment = "" # Store authority for Host header equivalent self._authority = authority # Convert HTTP/2 headers to HTTP/1.1 style # HTTP/2 headers are lowercase, convert to uppercase for WSGI self.headers = [] for name, value in stream.get_regular_headers(): # Convert to uppercase for WSGI compatibility self.headers.append((name.upper(), value)) # Set Host header from :authority (RFC 9113 section 8.3.1) # :authority MUST take precedence over Host header if authority: self.headers = [(n, v) for n, v in self.headers if n != 'HOST'] self.headers.append(('HOST', authority)) # Trailers (if any) self.trailers = [] if stream.trailers: self.trailers = [ (name.upper(), value) for name, value in stream.trailers ] # Body - HTTP/2 streams have complete body data body_data = stream.get_request_body() self.body = HTTP2Body(body_data) # Connection state self.must_close = False self._expected_100_continue = False # Request numbering (for logging) self.req_number = stream.stream_id # HTTP/2 does not use proxy protocol through the data stream self.proxy_protocol_info = None # Stream priority (RFC 7540 Section 5.3) self.priority_weight = stream.priority_weight self.priority_depends_on = stream.priority_depends_on def force_close(self): """Force the connection to close after this request.""" self.must_close = True def should_close(self): """Check if connection should close after this request. HTTP/2 connections are persistent by design, but we may still need to close if explicitly requested. Returns: bool: True if connection should close """ if self.must_close: return True # HTTP/2 connections are persistent, don't close by default return False def get_header(self, name): """Get a header value by name. Args: name: Header name (case-insensitive) Returns: str: Header value, or None if not found """ name = name.upper() for h_name, h_value in self.headers: if h_name == name: return h_value return None @property def content_length(self): """Get the Content-Length header value. Returns: int: Content length, or None if not set """ cl = self.get_header('CONTENT-LENGTH') if cl is not None: try: return int(cl) except ValueError: pass return None @property def content_type(self): """Get the Content-Type header value. Returns: str: Content type, or None if not set """ return self.get_header('CONTENT-TYPE') def __repr__(self): return ( f"" ) __all__ = ['HTTP2Request', 'HTTP2Body'] benoitc-gunicorn-f5fb19e/gunicorn/http2/stream.py000066400000000000000000000223351514360242400222220ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 stream state management. Each HTTP/2 stream represents a single request/response exchange. """ from enum import Enum, auto from io import BytesIO from .errors import HTTP2StreamError class StreamState(Enum): """HTTP/2 stream states as defined in RFC 7540 Section 5.1.""" IDLE = auto() RESERVED_LOCAL = auto() RESERVED_REMOTE = auto() OPEN = auto() HALF_CLOSED_LOCAL = auto() HALF_CLOSED_REMOTE = auto() CLOSED = auto() class HTTP2Stream: """Represents a single HTTP/2 stream. Manages stream state, headers, and body data for a single request/response exchange within an HTTP/2 connection. """ def __init__(self, stream_id, connection): """Initialize an HTTP/2 stream. Args: stream_id: The unique stream identifier (odd for client-initiated) connection: The parent HTTP2ServerConnection """ self.stream_id = stream_id self.connection = connection # Stream state self.state = StreamState.IDLE # Request data self.request_headers = [] self.request_body = BytesIO() self.request_complete = False # Response data self.response_started = False self.response_headers_sent = False self.response_complete = False # Flow control self.window_size = connection.initial_window_size # Request trailers self.trailers = None # Response trailers self.response_trailers = None # Stream priority (RFC 7540 Section 5.3) self.priority_weight = 16 self.priority_depends_on = 0 self.priority_exclusive = False @property def is_client_stream(self): """Check if this is a client-initiated stream (odd stream ID).""" return self.stream_id % 2 == 1 @property def is_server_stream(self): """Check if this is a server-initiated stream (even stream ID).""" return self.stream_id % 2 == 0 @property def can_receive(self): """Check if this stream can receive data.""" return self.state in ( StreamState.OPEN, StreamState.HALF_CLOSED_LOCAL, ) @property def can_send(self): """Check if this stream can send data.""" return self.state in ( StreamState.OPEN, StreamState.HALF_CLOSED_REMOTE, ) def receive_headers(self, headers, end_stream=False): """Process received HEADERS frame. Args: headers: List of (name, value) tuples end_stream: True if END_STREAM flag is set Raises: HTTP2StreamError: If headers received in invalid state """ if self.state == StreamState.IDLE: self.state = StreamState.OPEN elif self.state not in (StreamState.OPEN, StreamState.HALF_CLOSED_LOCAL): raise HTTP2StreamError( self.stream_id, f"Cannot receive headers in state {self.state.name}" ) self.request_headers.extend(headers) if end_stream: self._half_close_remote() self.request_complete = True def receive_data(self, data, end_stream=False): """Process received DATA frame. Args: data: Bytes received end_stream: True if END_STREAM flag is set Raises: HTTP2StreamError: If data received in invalid state """ if not self.can_receive: raise HTTP2StreamError( self.stream_id, f"Cannot receive data in state {self.state.name}" ) self.request_body.write(data) if end_stream: self._half_close_remote() self.request_complete = True def receive_trailers(self, trailers): """Process received trailing headers. Args: trailers: List of (name, value) tuples """ if not self.can_receive: raise HTTP2StreamError( self.stream_id, f"Cannot receive trailers in state {self.state.name}" ) self.trailers = trailers self._half_close_remote() self.request_complete = True def send_headers(self, headers, end_stream=False): """Mark headers as sent. Args: headers: List of (name, value) tuples to send end_stream: True if this completes the response Raises: HTTP2StreamError: If headers cannot be sent in current state """ if not self.can_send: raise HTTP2StreamError( self.stream_id, f"Cannot send headers in state {self.state.name}" ) self.response_started = True self.response_headers_sent = True if end_stream: self._half_close_local() self.response_complete = True def send_data(self, data, end_stream=False): """Mark data as sent. Args: data: Bytes to send end_stream: True if this completes the response Raises: HTTP2StreamError: If data cannot be sent in current state """ if not self.can_send: raise HTTP2StreamError( self.stream_id, f"Cannot send data in state {self.state.name}" ) if end_stream: self._half_close_local() self.response_complete = True def send_trailers(self, trailers): """Mark trailers as sent and close the stream. Args: trailers: List of (name, value) trailer tuples Raises: HTTP2StreamError: If trailers cannot be sent in current state """ if not self.can_send: raise HTTP2StreamError( self.stream_id, f"Cannot send trailers in state {self.state.name}" ) self.response_trailers = trailers self._half_close_local() self.response_complete = True def reset(self, error_code=0x8): """Reset this stream with RST_STREAM. Args: error_code: HTTP/2 error code (default: CANCEL) """ self.state = StreamState.CLOSED self.response_complete = True self.request_complete = True def close(self): """Close this stream normally.""" self.state = StreamState.CLOSED self.response_complete = True self.request_complete = True def update_priority(self, weight=None, depends_on=None, exclusive=None): """Update stream priority from PRIORITY frame. Args: weight: Priority weight (1-256), higher = more resources depends_on: Stream ID this stream depends on exclusive: Whether this is an exclusive dependency """ if weight is not None: self.priority_weight = max(1, min(256, weight)) if depends_on is not None: self.priority_depends_on = depends_on if exclusive is not None: self.priority_exclusive = exclusive def _half_close_local(self): """Transition to half-closed (local) state.""" if self.state == StreamState.OPEN: self.state = StreamState.HALF_CLOSED_LOCAL elif self.state == StreamState.HALF_CLOSED_REMOTE: self.state = StreamState.CLOSED else: raise HTTP2StreamError( self.stream_id, f"Cannot half-close local in state {self.state.name}" ) def _half_close_remote(self): """Transition to half-closed (remote) state.""" if self.state == StreamState.OPEN: self.state = StreamState.HALF_CLOSED_REMOTE elif self.state == StreamState.HALF_CLOSED_LOCAL: self.state = StreamState.CLOSED else: raise HTTP2StreamError( self.stream_id, f"Cannot half-close remote in state {self.state.name}" ) def get_request_body(self): """Get the complete request body. Returns: bytes: The request body data """ return self.request_body.getvalue() def get_pseudo_headers(self): """Extract HTTP/2 pseudo-headers from request headers. Returns: dict: Mapping of pseudo-header names to values (e.g., {':method': 'GET', ':path': '/'}) """ pseudo = {} for name, value in self.request_headers: if name.startswith(':'): pseudo[name] = value return pseudo def get_regular_headers(self): """Get regular (non-pseudo) headers from request. Returns: list: List of (name, value) tuples for regular headers """ return [ (name, value) for name, value in self.request_headers if not name.startswith(':') ] def __repr__(self): return ( f"" ) __all__ = ['HTTP2Stream', 'StreamState'] benoitc-gunicorn-f5fb19e/gunicorn/instrument/000077500000000000000000000000001514360242400215175ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/instrument/__init__.py000066400000000000000000000001511514360242400236250ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/gunicorn/instrument/statsd.py000066400000000000000000000115511514360242400233760ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. "Bare-bones implementation of statsD's protocol, client-side" import logging import socket from re import sub from gunicorn.glogging import Logger # Instrumentation constants METRIC_VAR = "metric" VALUE_VAR = "value" MTYPE_VAR = "mtype" GAUGE_TYPE = "gauge" COUNTER_TYPE = "counter" HISTOGRAM_TYPE = "histogram" TIMER_TYPE = "timer" class Statsd(Logger): """statsD-based instrumentation, that passes as a logger """ def __init__(self, cfg): Logger.__init__(self, cfg) self.prefix = sub(r"^(.+[^.]+)\.*$", "\\g<1>.", cfg.statsd_prefix) if isinstance(cfg.statsd_host, str): address_family = socket.AF_UNIX else: address_family = socket.AF_INET try: self.sock = socket.socket(address_family, socket.SOCK_DGRAM) self.sock.connect(cfg.statsd_host) except Exception: self.sock = None self.dogstatsd_tags = cfg.dogstatsd_tags # Log errors and warnings def critical(self, msg, *args, **kwargs): Logger.critical(self, msg, *args, **kwargs) self.increment("gunicorn.log.critical", 1) def error(self, msg, *args, **kwargs): Logger.error(self, msg, *args, **kwargs) self.increment("gunicorn.log.error", 1) def warning(self, msg, *args, **kwargs): Logger.warning(self, msg, *args, **kwargs) self.increment("gunicorn.log.warning", 1) def exception(self, msg, *args, **kwargs): Logger.exception(self, msg, *args, **kwargs) self.increment("gunicorn.log.exception", 1) # Special treatment for info, the most common log level def info(self, msg, *args, **kwargs): self.log(logging.INFO, msg, *args, **kwargs) # skip the run-of-the-mill logs def debug(self, msg, *args, **kwargs): self.log(logging.DEBUG, msg, *args, **kwargs) def log(self, lvl, msg, *args, **kwargs): """Log a given statistic if metric, value and type are present """ try: extra = kwargs.get("extra", None) if extra is not None: metric = extra.get(METRIC_VAR, None) value = extra.get(VALUE_VAR, None) typ = extra.get(MTYPE_VAR, None) if metric and value and typ: if typ == GAUGE_TYPE: self.gauge(metric, value) elif typ == COUNTER_TYPE: self.increment(metric, value) elif typ == HISTOGRAM_TYPE: self.histogram(metric, value) elif typ == TIMER_TYPE: self.timer(metric, value) else: pass # Log to parent logger only if there is something to say if msg: Logger.log(self, lvl, msg, *args, **kwargs) except Exception: Logger.warning(self, "Failed to log to statsd", exc_info=True) # access logging def access(self, resp, req, environ, request_time): """Measure request duration request_time is a datetime.timedelta """ Logger.access(self, resp, req, environ, request_time) duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3 status = resp.status if isinstance(status, bytes): status = status.decode('utf-8') if isinstance(status, str): status = int(status.split(None, 1)[0]) self.timer("gunicorn.request.duration", duration_in_ms) self.increment("gunicorn.requests", 1) self.increment("gunicorn.request.status.%d" % status, 1) # statsD methods # you can use those directly if you want def gauge(self, name, value): self._sock_send("{0}{1}:{2}|g".format(self.prefix, name, value)) def increment(self, name, value, sampling_rate=1.0): self._sock_send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) def decrement(self, name, value, sampling_rate=1.0): self._sock_send("{0}{1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) def timer(self, name, value): self._sock_send("{0}{1}:{2}|ms".format(self.prefix, name, value)) def histogram(self, name, value): self._sock_send("{0}{1}:{2}|h".format(self.prefix, name, value)) def _sock_send(self, msg): try: if isinstance(msg, str): msg = msg.encode("ascii") # http://docs.datadoghq.com/guides/dogstatsd/#datagram-format if self.dogstatsd_tags: msg = msg + b"|#" + self.dogstatsd_tags.encode('ascii') if self.sock: self.sock.send(msg) except Exception: Logger.warning(self, "Error sending message to statsd", exc_info=True) benoitc-gunicorn-f5fb19e/gunicorn/pidfile.py000066400000000000000000000044271514360242400213040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import errno import os import tempfile class Pidfile: """\ Manage a PID file. If a specific name is provided it and '"%s.oldpid" % name' will be used. Otherwise we create a temp file using os.mkstemp. """ def __init__(self, fname): self.fname = fname self.pid = None def create(self, pid): oldpid = self.validate() if oldpid: if oldpid == os.getpid(): return msg = "Already running on PID %s (or pid file '%s' is stale)" raise RuntimeError(msg % (oldpid, self.fname)) self.pid = pid # Write pidfile fdir = os.path.dirname(self.fname) if fdir and not os.path.isdir(fdir): raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir) fd, fname = tempfile.mkstemp(dir=fdir) os.write(fd, ("%s\n" % self.pid).encode('utf-8')) if self.fname: os.rename(fname, self.fname) else: self.fname = fname os.close(fd) # set permissions to -rw-r--r-- os.chmod(self.fname, 420) def rename(self, path): self.unlink() self.fname = path self.create(self.pid) def unlink(self): """ delete pidfile""" try: with open(self.fname) as f: pid1 = int(f.read() or 0) if pid1 == self.pid: os.unlink(self.fname) except Exception: pass def validate(self): """ Validate pidfile and make it stale if needed""" if not self.fname: return try: with open(self.fname) as f: try: wpid = int(f.read()) except ValueError: return try: os.kill(wpid, 0) return wpid except OSError as e: if e.args[0] == errno.EPERM: return wpid if e.args[0] == errno.ESRCH: return raise except OSError as e: if e.args[0] == errno.ENOENT: return raise benoitc-gunicorn-f5fb19e/gunicorn/reloader.py000066400000000000000000000072301514360242400214600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # pylint: disable=no-else-continue import os import os.path import re import sys import time import threading COMPILED_EXT_RE = re.compile(r'py[co]$') class ReloaderBase(threading.Thread): def __init__(self, extra_files=None, interval=1, callback=None): super().__init__() self.daemon = True self._extra_files = set(extra_files or ()) self._interval = interval self._callback = callback def add_extra_file(self, filename): self._extra_files.add(filename) def get_files(self): fnames = [ COMPILED_EXT_RE.sub('py', module.__file__) for module in tuple(sys.modules.values()) if getattr(module, '__file__', None) ] fnames.extend(self._extra_files) return fnames class Reloader(ReloaderBase): def run(self): mtimes = {} while True: for filename in self.get_files(): try: mtime = os.stat(filename).st_mtime except OSError: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: if self._callback: self._callback(filename) time.sleep(self._interval) has_inotify = False if sys.platform.startswith('linux'): try: from inotify.adapters import Inotify import inotify.constants has_inotify = True except ImportError: pass if has_inotify: class InotifyReloader(ReloaderBase): event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM | inotify.constants.IN_MOVED_TO) def __init__(self, extra_files=None, callback=None): super().__init__(extra_files=extra_files, callback=callback) self._dirs = set() self._watcher = Inotify() def add_extra_file(self, filename): super().add_extra_file(filename) dirname = os.path.dirname(filename) if dirname in self._dirs: return self._watcher.add_watch(dirname, mask=self.event_mask) self._dirs.add(dirname) def get_dirs(self): dirnames = [os.path.dirname(os.path.abspath(fname)) for fname in self.get_files()] return set(dirnames) def refresh_dirs(self): new_dirs = self.get_dirs().difference(self._dirs) self._dirs.update(new_dirs) for new_dir in new_dirs: if os.path.isdir(new_dir): self._watcher.add_watch(new_dir, mask=self.event_mask) def run(self): self.refresh_dirs() for event in self._watcher.event_gen(): if event is None: self.refresh_dirs() continue filename = event[3] self._callback(filename) else: class InotifyReloader: def __init__(self, extra_files=None, callback=None): raise ImportError('You must have the inotify module installed to ' 'use the inotify reloader') preferred_reloader = InotifyReloader if has_inotify else Reloader reloader_engines = { 'auto': preferred_reloader, 'poll': Reloader, 'inotify': InotifyReloader, } benoitc-gunicorn-f5fb19e/gunicorn/sock.py000066400000000000000000000221171514360242400206230ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import errno import os import socket import ssl import stat import struct import sys import time from gunicorn import util PLATFORM = sys.platform class BaseSocket: def __init__(self, address, conf, log, fd=None): self.log = log self.conf = conf self.cfg_addr = address if fd is None: sock = socket.socket(self.FAMILY, socket.SOCK_STREAM) bound = False else: sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM) os.close(fd) bound = True self.sock = self.set_options(sock, bound=bound) def __str__(self): return "" % self.sock.fileno() def __getattr__(self, name): return getattr(self.sock, name) def set_options(self, sock, bound=False): sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if (self.conf.reuse_port and hasattr(socket, 'SO_REUSEPORT')): # pragma: no cover try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except OSError as err: if err.errno not in (errno.ENOPROTOOPT, errno.EINVAL): raise if not bound: self.bind(sock) sock.setblocking(0) # make sure that the socket can be inherited if hasattr(sock, "set_inheritable"): sock.set_inheritable(True) sock.listen(self.conf.backlog) return sock def bind(self, sock): sock.bind(self.cfg_addr) def close(self): if self.sock is None: return try: self.sock.close() except OSError as e: self.log.info("Error while closing socket %s", str(e)) self.sock = None def get_backlog(self): return -1 class TCPSocket(BaseSocket): FAMILY = socket.AF_INET def __str__(self): if self.conf.is_ssl: scheme = "https" else: scheme = "http" addr = self.sock.getsockname() return "%s://%s:%d" % (scheme, addr[0], addr[1]) def set_options(self, sock, bound=False): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) return super().set_options(sock, bound=bound) if PLATFORM == "linux": def get_backlog(self): if self.sock: # tcp_info struct from include/uapi/linux/tcp.h fmt = 'B' * 8 + 'I' * 24 try: tcp_info_struct = self.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 104) # 12 is tcpi_unacked return struct.unpack(fmt, tcp_info_struct)[12] except (AttributeError, OSError): pass return 0 else: def get_backlog(self): return -1 class TCP6Socket(TCPSocket): FAMILY = socket.AF_INET6 def __str__(self): (host, port, _, _) = self.sock.getsockname() return "http://[%s]:%d" % (host, port) class UnixSocket(BaseSocket): FAMILY = socket.AF_UNIX def __init__(self, addr, conf, log, fd=None): if fd is None: try: st = os.stat(addr) except OSError as e: if e.args[0] != errno.ENOENT: raise else: if stat.S_ISSOCK(st.st_mode): os.remove(addr) else: raise ValueError("%r is not a socket" % addr) super().__init__(addr, conf, log, fd=fd) def __str__(self): return "unix:%s" % self.cfg_addr def bind(self, sock): old_umask = os.umask(self.conf.umask) sock.bind(self.cfg_addr) util.chown(self.cfg_addr, self.conf.uid, self.conf.gid) os.umask(old_umask) def _sock_type(addr): if isinstance(addr, tuple): if util.is_ipv6(addr[0]): sock_type = TCP6Socket else: sock_type = TCPSocket elif isinstance(addr, (str, bytes)): sock_type = UnixSocket else: raise TypeError("Unable to create socket from: %r" % addr) return sock_type def create_sockets(conf, log, fds=None): """ Create a new socket for the configured addresses or file descriptors. If a configured address is a tuple then a TCP socket is created. If it is a string, a Unix socket is created. Otherwise, a TypeError is raised. """ listeners = [] # get it only once addr = conf.address fdaddr = [bind for bind in addr if isinstance(bind, int)] if fds: fdaddr += list(fds) laddr = [bind for bind in addr if not isinstance(bind, int)] # check ssl config early to raise the error on startup # only the certfile is needed since it can contains the keyfile if conf.certfile and not os.path.exists(conf.certfile): raise ValueError('certfile "%s" does not exist' % conf.certfile) if conf.keyfile and not os.path.exists(conf.keyfile): raise ValueError('keyfile "%s" does not exist' % conf.keyfile) # sockets are already bound if fdaddr: for fd in fdaddr: sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) sock_name = sock.getsockname() sock_type = _sock_type(sock_name) listener = sock_type(sock_name, conf, log, fd=fd) listeners.append(listener) return listeners # no sockets is bound, first initialization of gunicorn in this env. for addr in laddr: sock_type = _sock_type(addr) sock = None for i in range(5): try: sock = sock_type(addr, conf, log) except OSError as e: if e.args[0] == errno.EADDRINUSE: log.error("Connection in use: %s", str(addr)) if e.args[0] == errno.EADDRNOTAVAIL: log.error("Invalid address: %s", str(addr)) msg = "connection to {addr} failed: {error}" log.error(msg.format(addr=str(addr), error=str(e))) if i < 5: log.debug("Retrying in 1 second.") time.sleep(1) else: break if sock is None: log.error("Can't connect to %s", str(addr)) sys.exit(1) listeners.append(sock) return listeners def close_sockets(listeners, unlink=True): for sock in listeners: sock_name = sock.getsockname() sock.close() if unlink and _sock_type(sock_name) is UnixSocket: os.unlink(sock_name) def _get_alpn_protocols(conf): """Get ALPN protocol list from configuration. Returns list of ALPN protocol identifiers based on http_protocols setting. Returns empty list if HTTP/2 is not configured or available. """ from gunicorn.config import ALPN_PROTOCOL_MAP http_protocols = conf.http_protocols if not http_protocols: return [] # Only configure ALPN if h2 is in the protocol list if "h2" not in http_protocols: return [] # Check if h2 library is available from gunicorn.http2 import is_http2_available if not is_http2_available(): return [] # Map to ALPN identifiers, maintaining preference order alpn_protocols = [] for proto in http_protocols: if proto in ALPN_PROTOCOL_MAP: alpn_protocols.append(ALPN_PROTOCOL_MAP[proto]) return alpn_protocols def ssl_context(conf): def default_ssl_context_factory(): context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=conf.ca_certs) context.load_cert_chain(certfile=conf.certfile, keyfile=conf.keyfile) context.verify_mode = conf.cert_reqs if conf.ciphers: context.set_ciphers(conf.ciphers) # Configure ALPN for HTTP/2 if enabled alpn_protocols = _get_alpn_protocols(conf) if alpn_protocols: context.set_alpn_protocols(alpn_protocols) return context return conf.ssl_context(conf, default_ssl_context_factory) def ssl_wrap_socket(sock, conf): return ssl_context(conf).wrap_socket(sock, server_side=True, suppress_ragged_eofs=conf.suppress_ragged_eofs, do_handshake_on_connect=conf.do_handshake_on_connect) def get_negotiated_protocol(ssl_socket): """Get the negotiated ALPN protocol from an SSL socket. Returns: str: The negotiated protocol name ('h2', 'http/1.1', etc.) or None if no protocol was negotiated. """ if not isinstance(ssl_socket, ssl.SSLSocket): return None try: return ssl_socket.selected_alpn_protocol() except (AttributeError, ssl.SSLError): return None def is_http2_negotiated(ssl_socket): """Check if HTTP/2 was negotiated on an SSL socket. Returns: bool: True if HTTP/2 was negotiated via ALPN. """ protocol = get_negotiated_protocol(ssl_socket) return protocol == "h2" benoitc-gunicorn-f5fb19e/gunicorn/systemd.py000066400000000000000000000047021514360242400213540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import socket SD_LISTEN_FDS_START = 3 def listen_fds(unset_environment=True): """ Get the number of sockets inherited from systemd socket activation. :param unset_environment: clear systemd environment variables unless False :type unset_environment: bool :return: the number of sockets to inherit from systemd socket activation :rtype: int Returns zero immediately if $LISTEN_PID is not set to the current pid. Otherwise, returns the number of systemd activation sockets specified by $LISTEN_FDS. When $LISTEN_PID matches the current pid, unsets the environment variables unless the ``unset_environment`` flag is ``False``. .. note:: Unlike the sd_listen_fds C function, this implementation does not set the FD_CLOEXEC flag because the gunicorn arbiter never needs to do this. .. seealso:: ``_ """ fds = int(os.environ.get('LISTEN_FDS', 0)) listen_pid = int(os.environ.get('LISTEN_PID', 0)) if listen_pid != os.getpid(): return 0 if unset_environment: os.environ.pop('LISTEN_PID', None) os.environ.pop('LISTEN_FDS', None) return fds def sd_notify(state, logger, unset_environment=False): """Send a notification to systemd. state is a string; see the man page of sd_notify (http://www.freedesktop.org/software/systemd/man/sd_notify.html) for a description of the allowable values. If the unset_environment parameter is True, sd_notify() will unset the $NOTIFY_SOCKET environment variable before returning (regardless of whether the function call itself succeeded or not). Further calls to sd_notify() will then fail, but the variable is no longer inherited by child processes. """ addr = os.environ.get('NOTIFY_SOCKET') if addr is None: # not run in a service, just a noop return try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC) if addr[0] == '@': addr = '\0' + addr[1:] sock.connect(addr) sock.sendall(state.encode('utf-8')) except Exception: logger.debug("Exception while invoking sd_notify()", exc_info=True) finally: if unset_environment: os.environ.pop('NOTIFY_SOCKET') sock.close() benoitc-gunicorn-f5fb19e/gunicorn/util.py000066400000000000000000000457241514360242400206520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import ast import email.utils import errno import fcntl import html import importlib import inspect import io import logging import os import pwd import random import re import socket import sys import textwrap import time import traceback import warnings try: import importlib.metadata as importlib_metadata except (ModuleNotFoundError, ImportError): import importlib_metadata from gunicorn.errors import AppImportError from gunicorn.workers import SUPPORTED_WORKERS import urllib.parse REDIRECT_TO = getattr(os, 'devnull', '/dev/null') # Server and Date aren't technically hop-by-hop # headers, but they are in the purview of the # origin server which the WSGI spec says we should # act like. So we drop them and add our own. # # In the future, concatenation server header values # might be better, but nothing else does it and # dropping them is easier. hop_headers = set(""" connection keep-alive proxy-authenticate proxy-authorization te trailers transfer-encoding upgrade server date """.split()) # setproctitle causes segfaults on macOS due to fork() safety issues # https://github.com/benoitc/gunicorn/issues/3021 if sys.platform == "darwin": def _setproctitle(title): pass else: try: from setproctitle import setproctitle, getproctitle # Force early initialization before any os.environ modifications # (e.g. removing LISTEN_FDS in systemd socket activation) # https://github.com/benoitc/gunicorn/issues/3430 getproctitle() def _setproctitle(title): setproctitle("gunicorn: %s" % title) except ImportError: def _setproctitle(title): pass def load_entry_point(distribution, group, name): dist_obj = importlib_metadata.distribution(distribution) eps = [ep for ep in dist_obj.entry_points if ep.group == group and ep.name == name] if not eps: raise ImportError("Entry point %r not found" % ((group, name),)) return eps[0].load() def load_class(uri, default="gunicorn.workers.sync.SyncWorker", section="gunicorn.workers"): if inspect.isclass(uri): return uri if uri.startswith("egg:"): # uses entry points entry_str = uri.split("egg:")[1] try: dist, name = entry_str.rsplit("#", 1) except ValueError: dist = entry_str name = default try: return load_entry_point(dist, section, name) except Exception: exc = traceback.format_exc() msg = "class uri %r invalid or not found: \n\n[%s]" raise RuntimeError(msg % (uri, exc)) else: components = uri.split('.') if len(components) == 1: while True: if uri.startswith("#"): uri = uri[1:] if uri in SUPPORTED_WORKERS: components = SUPPORTED_WORKERS[uri].split(".") break try: return load_entry_point( "gunicorn", section, uri ) except Exception: exc = traceback.format_exc() msg = "class uri %r invalid or not found: \n\n[%s]" raise RuntimeError(msg % (uri, exc)) klass = components.pop(-1) try: mod = importlib.import_module('.'.join(components)) except Exception: exc = traceback.format_exc() msg = "class uri %r invalid or not found: \n\n[%s]" raise RuntimeError(msg % (uri, exc)) return getattr(mod, klass) positionals = ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, ) def get_arity(f): sig = inspect.signature(f) arity = 0 for param in sig.parameters.values(): if param.kind in positionals: arity += 1 return arity def get_username(uid): """ get the username for a user id""" return pwd.getpwuid(uid).pw_name def set_owner_process(uid, gid, initgroups=False): """ set user and group of workers processes """ if gid: if uid: try: username = get_username(uid) except KeyError: initgroups = False if initgroups: os.initgroups(username, gid) elif gid != os.getgid(): os.setgid(gid) if uid and uid != os.getuid(): os.setuid(uid) def chown(path, uid, gid): os.chown(path, uid, gid) if sys.platform.startswith("win"): def _waitfor(func, pathname, waitall=False): # Perform the operation func(pathname) # Now setup the wait loop if waitall: dirname = pathname else: dirname, name = os.path.split(pathname) dirname = dirname or '.' # Check for `pathname` to be removed from the filesystem. # The exponential backoff of the timeout amounts to a total # of ~1 second after which the deletion is probably an error # anyway. # Testing on a i7@4.3GHz shows that usually only 1 iteration is # required when contention occurs. timeout = 0.001 while timeout < 1.0: # Note we are only testing for the existence of the file(s) in # the contents of the directory regardless of any security or # access rights. If we have made it this far, we have sufficient # permissions to do that much using Python's equivalent of the # Windows API FindFirstFile. # Other Windows APIs can fail or give incorrect results when # dealing with files that are pending deletion. L = os.listdir(dirname) if not L if waitall else name in L: return # Increase the timeout and try again time.sleep(timeout) timeout *= 2 warnings.warn('tests may fail, delete still pending for ' + pathname, RuntimeWarning, stacklevel=4) def _unlink(filename): _waitfor(os.unlink, filename) else: _unlink = os.unlink def unlink(filename): try: _unlink(filename) except OSError as error: # The filename need not exist. if error.errno not in (errno.ENOENT, errno.ENOTDIR): raise def is_ipv6(addr): try: socket.inet_pton(socket.AF_INET6, addr) except OSError: # not a valid address return False except ValueError: # ipv6 not supported on this platform return False return True def parse_address(netloc, default_port='8000'): if re.match(r'unix:(//)?', netloc): return re.split(r'unix:(//)?', netloc)[-1] if netloc.startswith("fd://"): fd = netloc[5:] try: return int(fd) except ValueError: raise RuntimeError("%r is not a valid file descriptor." % fd) from None if netloc.startswith("tcp://"): netloc = netloc.split("tcp://")[1] host, port = netloc, default_port if '[' in netloc and ']' in netloc: host = netloc.split(']')[0][1:] port = (netloc.split(']:') + [default_port])[1] elif ':' in netloc: host, port = (netloc.split(':') + [default_port])[:2] elif netloc == "": host, port = "0.0.0.0", default_port try: port = int(port) except ValueError: raise RuntimeError("%r is not a valid port number." % port) return host.lower(), port def close_on_exec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) def set_non_blocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags) def close(sock): try: sock.close() except OSError: pass try: from os import closerange except ImportError: def closerange(fd_low, fd_high): # Iterate through and close all file descriptors. for fd in range(fd_low, fd_high): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass def write_chunk(sock, data): if isinstance(data, str): data = data.encode('utf-8') chunk_size = "%X\r\n" % len(data) chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"]) sock.sendall(chunk) def write(sock, data, chunked=False): if chunked: return write_chunk(sock, data) sock.sendall(data) def write_nonblock(sock, data, chunked=False): timeout = sock.gettimeout() if timeout != 0.0: try: sock.setblocking(0) return write(sock, data, chunked) finally: sock.setblocking(1) else: return write(sock, data, chunked) def write_error(sock, status_int, reason, mesg): html_error = textwrap.dedent("""\ %(reason)s

%(reason)s

%(mesg)s """) % {"reason": reason, "mesg": html.escape(mesg)} http = textwrap.dedent("""\ HTTP/1.1 %s %s\r Connection: close\r Content-Type: text/html\r Content-Length: %d\r \r %s""") % (str(status_int), reason, len(html_error), html_error) write_nonblock(sock, http.encode('latin1')) def _called_with_wrong_args(f): """Check whether calling a function raised a ``TypeError`` because the call failed or because something in the function raised the error. :param f: The function that was called. :return: ``True`` if the call failed. """ tb = sys.exc_info()[2] try: while tb is not None: if tb.tb_frame.f_code is f.__code__: # In the function, it was called successfully. return False tb = tb.tb_next # Didn't reach the function. return True finally: # Delete tb to break a circular reference in Python 2. # https://docs.python.org/2/library/sys.html#sys.exc_info del tb def import_app(module): parts = module.split(":", 1) if len(parts) == 1: obj = "application" else: module, obj = parts[0], parts[1] try: mod = importlib.import_module(module) except ImportError: if module.endswith(".py") and os.path.exists(module): msg = "Failed to find application, did you mean '%s:%s'?" raise ImportError(msg % (module.rsplit(".", 1)[0], obj)) raise # Parse obj as a single expression to determine if it's a valid # attribute name or function call. try: expression = ast.parse(obj, mode="eval").body except SyntaxError: raise AppImportError( "Failed to parse %r as an attribute name or function call." % obj ) if isinstance(expression, ast.Name): name = expression.id args = kwargs = None elif isinstance(expression, ast.Call): # Ensure the function name is an attribute name only. if not isinstance(expression.func, ast.Name): raise AppImportError("Function reference must be a simple name: %r" % obj) name = expression.func.id # Parse the positional and keyword arguments as literals. try: args = [ast.literal_eval(arg) for arg in expression.args] kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expression.keywords} except ValueError: # literal_eval gives cryptic error messages, show a generic # message with the full expression instead. raise AppImportError( "Failed to parse arguments as literal values: %r" % obj ) else: raise AppImportError( "Failed to parse %r as an attribute name or function call." % obj ) is_debug = logging.root.level == logging.DEBUG try: app = getattr(mod, name) except AttributeError: if is_debug: traceback.print_exception(*sys.exc_info()) raise AppImportError("Failed to find attribute %r in %r." % (name, module)) # If the expression was a function call, call the retrieved object # to get the real application. if args is not None: try: app = app(*args, **kwargs) except TypeError as e: # If the TypeError was due to bad arguments to the factory # function, show Python's nice error message without a # traceback. if _called_with_wrong_args(app): raise AppImportError( "".join(traceback.format_exception_only(TypeError, e)).strip() ) # Otherwise it was raised from within the function, show the # full traceback. raise if app is None: raise AppImportError("Failed to find application object: %r" % obj) if not callable(app): raise AppImportError("Application object must be callable.") return app def getcwd(): # get current path, try to use PWD env first try: a = os.stat(os.environ['PWD']) b = os.stat(os.getcwd()) if a.st_ino == b.st_ino and a.st_dev == b.st_dev: cwd = os.environ['PWD'] else: cwd = os.getcwd() except Exception: cwd = os.getcwd() return cwd def http_date(timestamp=None): """Return the current date and time formatted for a message header.""" if timestamp is None: timestamp = time.time() s = email.utils.formatdate(timestamp, localtime=False, usegmt=True) return s def is_hoppish(header): return header.lower().strip() in hop_headers def daemonize(enable_stdio_inheritance=False): """\ Standard daemonization of a process. http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7 """ if 'GUNICORN_FD' not in os.environ: if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0o22) # In both the following any file descriptors above stdin # stdout and stderr are left untouched. The inheritance # option simply allows one to have output go to a file # specified by way of shell redirection when not wanting # to use --error-log option. if not enable_stdio_inheritance: # Remap all of stdin, stdout and stderr on to # /dev/null. The expectation is that users have # specified the --error-log option. closerange(0, 3) fd_null = os.open(REDIRECT_TO, os.O_RDWR) # PEP 446, make fd for /dev/null inheritable os.set_inheritable(fd_null, True) # expect fd_null to be always 0 here, but in-case not ... if fd_null != 0: os.dup2(fd_null, 0) os.dup2(fd_null, 1) os.dup2(fd_null, 2) else: fd_null = os.open(REDIRECT_TO, os.O_RDWR) # Always redirect stdin to /dev/null as we would # never expect to need to read interactive input. if fd_null != 0: os.close(0) os.dup2(fd_null, 0) # If stdout and stderr are still connected to # their original file descriptors we check to see # if they are associated with terminal devices. # When they are we map them to /dev/null so that # are still detached from any controlling terminal # properly. If not we preserve them as they are. # # If stdin and stdout were not hooked up to the # original file descriptors, then all bets are # off and all we can really do is leave them as # they were. # # This will allow 'gunicorn ... > output.log 2>&1' # to work with stdout/stderr going to the file # as expected. # # Note that if using --error-log option, the log # file specified through shell redirection will # only be used up until the log file specified # by the option takes over. As it replaces stdout # and stderr at the file descriptor level, then # anything using stdout or stderr, including having # cached a reference to them, will still work. def redirect(stream, fd_expect): try: fd = stream.fileno() if fd == fd_expect and stream.isatty(): os.close(fd) os.dup2(fd_null, fd) except AttributeError: pass redirect(sys.stdout, 1) redirect(sys.stderr, 2) def seed(): try: random.seed(os.urandom(64)) except NotImplementedError: random.seed('%s.%s' % (time.time(), os.getpid())) def check_is_writable(path): try: with open(path, 'a') as f: f.close() except OSError as e: raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e)) def to_bytestring(value, encoding="utf8"): """Converts a string argument to a byte string""" if isinstance(value, bytes): return value if not isinstance(value, str): raise TypeError('%r is not a string' % value) return value.encode(encoding) def has_fileno(obj): if not hasattr(obj, "fileno"): return False # check BytesIO case and maybe others try: obj.fileno() except (AttributeError, OSError, io.UnsupportedOperation): return False return True def warn(msg): print("!!!", file=sys.stderr) lines = msg.splitlines() for i, line in enumerate(lines): if i == 0: line = "WARNING: %s" % line print("!!! %s" % line, file=sys.stderr) print("!!!\n", file=sys.stderr) sys.stderr.flush() def make_fail_app(msg): msg = to_bytestring(msg) def app(environ, start_response): start_response("500 Internal Server Error", [ ("Content-Type", "text/plain"), ("Content-Length", str(len(msg))) ]) return [msg] return app def split_request_uri(uri): if uri.startswith("//"): # When the path starts with //, urlsplit considers it as a # relative uri while the RFC says we should consider it as abs_path # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 # We use temporary dot prefix to workaround this behaviour parts = urllib.parse.urlsplit("." + uri) return parts._replace(path=parts.path[1:]) return urllib.parse.urlsplit(uri) # From six.reraise def reraise(tp, value, tb=None): try: if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value finally: value = None tb = None def bytes_to_str(b): if isinstance(b, str): return b return str(b, 'latin1') def unquote_to_wsgi_str(string): return urllib.parse.unquote_to_bytes(string).decode('latin-1') benoitc-gunicorn-f5fb19e/gunicorn/uwsgi/000077500000000000000000000000001514360242400204455ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/uwsgi/__init__.py000066400000000000000000000007661514360242400225670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.uwsgi.message import UWSGIRequest from gunicorn.uwsgi.parser import UWSGIParser from gunicorn.uwsgi.errors import ( UWSGIParseException, InvalidUWSGIHeader, UnsupportedModifier, ForbiddenUWSGIRequest, ) __all__ = [ 'UWSGIRequest', 'UWSGIParser', 'UWSGIParseException', 'InvalidUWSGIHeader', 'UnsupportedModifier', 'ForbiddenUWSGIRequest', ] benoitc-gunicorn-f5fb19e/gunicorn/uwsgi/errors.py000066400000000000000000000024461514360242400223410ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # We don't need to call super() in __init__ methods of our # BaseException and Exception classes because we also define # our own __str__ methods so there is no need to pass 'message' # to the base class to get a meaningful output from 'str(exc)'. # pylint: disable=super-init-not-called class UWSGIParseException(Exception): """Base exception for uWSGI protocol parsing errors.""" class InvalidUWSGIHeader(UWSGIParseException): """Raised when the uWSGI header is malformed.""" def __init__(self, msg=""): self.msg = msg self.code = 400 def __str__(self): return "Invalid uWSGI header: %s" % self.msg class UnsupportedModifier(UWSGIParseException): """Raised when modifier1 is not 0 (WSGI request).""" def __init__(self, modifier): self.modifier = modifier self.code = 501 def __str__(self): return "Unsupported uWSGI modifier1: %d" % self.modifier class ForbiddenUWSGIRequest(UWSGIParseException): """Raised when source IP is not in the allow list.""" def __init__(self, host): self.host = host self.code = 403 def __str__(self): return "uWSGI request from %r not allowed" % self.host benoitc-gunicorn-f5fb19e/gunicorn/uwsgi/message.py000066400000000000000000000206271514360242400224520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io from gunicorn.http.body import LengthReader, Body from gunicorn.uwsgi.errors import ( InvalidUWSGIHeader, UnsupportedModifier, ForbiddenUWSGIRequest, ) # Maximum number of variables to prevent DoS MAX_UWSGI_VARS = 1000 class UWSGIRequest: """uWSGI protocol request parser. The uWSGI protocol uses a 4-byte binary header: - Byte 0: modifier1 (packet type, 0 = WSGI request) - Bytes 1-2: datasize (16-bit little-endian, size of vars block) - Byte 3: modifier2 (additional flags, typically 0) After the header: 1. Vars block (datasize bytes): Key-value pairs containing WSGI environ - Each pair: 2-byte key_size (LE) + key + 2-byte val_size (LE) + value 2. Request body (determined by CONTENT_LENGTH in vars) """ def __init__(self, cfg, unreader, peer_addr, req_number=1): self.cfg = cfg self.unreader = unreader self.peer_addr = peer_addr self.remote_addr = peer_addr self.req_number = req_number # Request attributes (compatible with HTTP Request interface) self.method = None self.uri = None self.path = None self.query = None self.fragment = "" self.version = (1, 1) # uWSGI is HTTP/1.1 compatible self.headers = [] self.trailers = [] self.body = None self.scheme = "https" if cfg.is_ssl else "http" self.must_close = False # uWSGI specific self.uwsgi_vars = {} self.modifier1 = 0 self.modifier2 = 0 # Proxy protocol compatibility self.proxy_protocol_info = None # 100-continue: not applicable for uWSGI as the frontend handles this self._expected_100_continue = False # Check if the source IP is allowed self._check_allowed_ip() # Parse the request unused = self.parse(self.unreader) self.unreader.unread(unused) self.set_body_reader() def _check_allowed_ip(self): """Verify source IP is in the allowed list.""" allow_ips = getattr(self.cfg, 'uwsgi_allow_ips', ['127.0.0.1', '::1']) # UNIX sockets don't have IP addresses if not isinstance(self.peer_addr, tuple): return # Wildcard allows all if '*' in allow_ips: return if self.peer_addr[0] not in allow_ips: raise ForbiddenUWSGIRequest(self.peer_addr[0]) def force_close(self): """Force the connection to close after this request.""" self.must_close = True def parse(self, unreader): """Parse uWSGI packet header and vars block.""" # Read the 4-byte header header = self._read_exact(unreader, 4) if len(header) < 4: raise InvalidUWSGIHeader("incomplete header") self.modifier1 = header[0] datasize = int.from_bytes(header[1:3], 'little') self.modifier2 = header[3] # Only modifier1=0 (WSGI request) is supported if self.modifier1 != 0: raise UnsupportedModifier(self.modifier1) # Read the vars block if datasize > 0: vars_data = self._read_exact(unreader, datasize) if len(vars_data) < datasize: raise InvalidUWSGIHeader("incomplete vars block") self._parse_vars(vars_data) # Extract HTTP request info from vars self._extract_request_info() return b"" def _read_exact(self, unreader, size): """Read exactly size bytes from the unreader.""" buf = io.BytesIO() remaining = size while remaining > 0: data = unreader.read() if not data: break buf.write(data) remaining = size - buf.tell() result = buf.getvalue() # Put back any extra bytes if len(result) > size: unreader.unread(result[size:]) result = result[:size] return result def _parse_vars(self, data): """Parse uWSGI vars block into key-value pairs. Format: key_size (2 bytes LE) + key + val_size (2 bytes LE) + value """ pos = 0 var_count = 0 while pos < len(data): if var_count >= MAX_UWSGI_VARS: raise InvalidUWSGIHeader("too many variables") # Key size (2 bytes, little-endian) if pos + 2 > len(data): raise InvalidUWSGIHeader("truncated key size") key_size = int.from_bytes(data[pos:pos + 2], 'little') pos += 2 # Key if pos + key_size > len(data): raise InvalidUWSGIHeader("truncated key") key = data[pos:pos + key_size].decode('latin-1') pos += key_size # Value size (2 bytes, little-endian) if pos + 2 > len(data): raise InvalidUWSGIHeader("truncated value size") val_size = int.from_bytes(data[pos:pos + 2], 'little') pos += 2 # Value if pos + val_size > len(data): raise InvalidUWSGIHeader("truncated value") value = data[pos:pos + val_size].decode('latin-1') pos += val_size self.uwsgi_vars[key] = value var_count += 1 def _extract_request_info(self): """Extract HTTP request info from uWSGI vars. Header Mapping (CGI/WSGI to HTTP): The uWSGI protocol passes HTTP headers using CGI-style environment variable naming. This method converts them back to HTTP header format: - HTTP_* vars: Strip 'HTTP_' prefix, replace '_' with '-' Example: HTTP_X_FORWARDED_FOR -> X-FORWARDED-FOR Example: HTTP_ACCEPT_ENCODING -> ACCEPT-ENCODING - CONTENT_TYPE: Mapped directly to CONTENT-TYPE header (CGI spec excludes HTTP_ prefix for this header) - CONTENT_LENGTH: Mapped directly to CONTENT-LENGTH header (CGI spec excludes HTTP_ prefix for this header) Note: The underscore-to-hyphen conversion is lossy. Headers that originally contained underscores (e.g., X_Custom_Header) cannot be distinguished from hyphenated headers (X-Custom-Header) after passing through nginx/uWSGI. This is a CGI/WSGI specification limitation, not specific to this implementation. """ # Method self.method = self.uwsgi_vars.get('REQUEST_METHOD', 'GET') # URI and path self.path = self.uwsgi_vars.get('PATH_INFO', '/') self.query = self.uwsgi_vars.get('QUERY_STRING', '') # Build URI if self.query: self.uri = "%s?%s" % (self.path, self.query) else: self.uri = self.path # Scheme if self.uwsgi_vars.get('HTTPS', '').lower() in ('on', '1', 'true'): self.scheme = 'https' elif 'wsgi.url_scheme' in self.uwsgi_vars: self.scheme = self.uwsgi_vars['wsgi.url_scheme'] # Extract HTTP headers from CGI-style vars # See docstring above for mapping details for key, value in self.uwsgi_vars.items(): if key.startswith('HTTP_'): # Convert HTTP_HEADER_NAME to HEADER-NAME header_name = key[5:].replace('_', '-') self.headers.append((header_name, value)) elif key == 'CONTENT_TYPE': self.headers.append(('CONTENT-TYPE', value)) elif key == 'CONTENT_LENGTH': self.headers.append(('CONTENT-LENGTH', value)) def set_body_reader(self): """Set up the body reader based on CONTENT_LENGTH.""" content_length = 0 # Get content length from vars if 'CONTENT_LENGTH' in self.uwsgi_vars: try: content_length = max(int(self.uwsgi_vars['CONTENT_LENGTH']), 0) except ValueError: content_length = 0 self.body = Body(LengthReader(self.unreader, content_length)) def should_close(self): """Determine if the connection should be closed after this request.""" if self.must_close: return True # Check HTTP_CONNECTION header connection = self.uwsgi_vars.get('HTTP_CONNECTION', '').lower() if connection == 'close': return True elif connection == 'keep-alive': return False # Default to keep-alive for HTTP/1.1 return False benoitc-gunicorn-f5fb19e/gunicorn/uwsgi/parser.py000066400000000000000000000004541514360242400223160ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.parser import Parser from gunicorn.uwsgi.message import UWSGIRequest class UWSGIParser(Parser): """Parser for uWSGI protocol requests.""" mesg_class = UWSGIRequest benoitc-gunicorn-f5fb19e/gunicorn/workers/000077500000000000000000000000001514360242400210035ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/gunicorn/workers/__init__.py000066400000000000000000000012241514360242400231130ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # supported gunicorn workers. SUPPORTED_WORKERS = { "sync": "gunicorn.workers.sync.SyncWorker", "eventlet": "gunicorn.workers.geventlet.EventletWorker", # DEPRECATED: will be removed in 26.0 "gevent": "gunicorn.workers.ggevent.GeventWorker", "gevent_wsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker", "gevent_pywsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker", "tornado": "gunicorn.workers.gtornado.TornadoWorker", "gthread": "gunicorn.workers.gthread.ThreadWorker", "asgi": "gunicorn.workers.gasgi.ASGIWorker", } benoitc-gunicorn-f5fb19e/gunicorn/workers/base.py000066400000000000000000000230221514360242400222660ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import os import signal import sys import time import traceback from datetime import datetime from random import randint from ssl import SSLError from gunicorn import util from gunicorn.http.errors import ( ForbiddenProxyRequest, InvalidHeader, InvalidHeaderName, InvalidHTTPVersion, InvalidProxyLine, InvalidRequestLine, InvalidRequestMethod, InvalidSchemeHeaders, LimitRequestHeaders, LimitRequestLine, UnsupportedTransferCoding, ExpectationFailed, ConfigurationProblem, ObsoleteFolding, ) from gunicorn.http.wsgi import Response, default_environ from gunicorn.reloader import reloader_engines from gunicorn.workers.workertmp import WorkerTmp class Worker: SIGNALS = [getattr(signal, "SIG%s" % x) for x in ( "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split() )] PIPE = [] def __init__(self, age, ppid, sockets, app, timeout, cfg, log): """\ This is called pre-fork so it shouldn't do anything to the current process. If there's a need to make process wide changes you'll want to do that in ``self.init_process()``. """ self.age = age self.pid = "[booting]" self.ppid = ppid self.sockets = sockets self.app = app self.timeout = timeout self.cfg = cfg self.booted = False self.aborted = False self.reloader = None self.nr = 0 if cfg.max_requests > 0: jitter = randint(0, cfg.max_requests_jitter) self.max_requests = cfg.max_requests + jitter else: self.max_requests = sys.maxsize self.alive = True self.log = log self.tmp = WorkerTmp(cfg) def __str__(self): return "" % self.pid def notify(self): """\ Your worker subclass must arrange to have this method called once every ``self.timeout`` seconds. If you fail in accomplishing this task, the master process will murder your workers. """ self.tmp.notify() def run(self): """\ This is the mainloop of a worker process. You should override this method in a subclass to provide the intended behaviour for your particular evil schemes. """ raise NotImplementedError() def init_process(self): """\ If you override this method in a subclass, the last statement in the function should be to call this method with super().init_process() so that the ``run()`` loop is initiated. """ # set environment' variables if self.cfg.env: for k, v in self.cfg.env.items(): os.environ[k] = v util.set_owner_process(self.cfg.uid, self.cfg.gid, initgroups=self.cfg.initgroups) # Reseed the random number generator util.seed() # For waking ourselves up self.PIPE = os.pipe() for p in self.PIPE: util.set_non_blocking(p) util.close_on_exec(p) # Prevent fd inheritance for s in self.sockets: util.close_on_exec(s) util.close_on_exec(self.tmp.fileno()) self.wait_fds = self.sockets + [self.PIPE[0]] self.log.close_on_exec() self.init_signals() # start the reloader if self.cfg.reload: def changed(fname): self.log.info("Worker reloading: %s modified", fname) self.alive = False os.write(self.PIPE[1], b"1") self.cfg.worker_int(self) time.sleep(0.1) sys.exit(0) self.log.warning("Reloader is on. Use in development only!") reloader_cls = reloader_engines[self.cfg.reload_engine] self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files, callback=changed) self.load_wsgi() if self.reloader: self.reloader.start() self.cfg.post_worker_init(self) # Enter main run loop self.booted = True self.run() def load_wsgi(self): try: self.wsgi = self.app.wsgi() except SyntaxError as e: if not self.cfg.reload: raise self.log.exception(e) if self.reloader is not None and e.filename is not None: self.reloader.add_extra_file(e.filename) with io.StringIO() as tb_string: traceback.print_exception(e, file=tb_string) self.wsgi = util.make_fail_app(tb_string.getvalue()) def init_signals(self): # reset signaling for s in self.SIGNALS: signal.signal(s, signal.SIG_DFL) # init new signaling signal.signal(signal.SIGQUIT, self.handle_quit) signal.signal(signal.SIGTERM, self.handle_exit) signal.signal(signal.SIGINT, self.handle_quit) signal.signal(signal.SIGWINCH, self.handle_winch) signal.signal(signal.SIGUSR1, self.handle_usr1) signal.signal(signal.SIGABRT, self.handle_abort) # Don't let SIGTERM and SIGUSR1 disturb active requests # by interrupting system calls signal.siginterrupt(signal.SIGTERM, False) signal.siginterrupt(signal.SIGUSR1, False) if hasattr(signal, 'set_wakeup_fd'): signal.set_wakeup_fd(self.PIPE[1]) def handle_usr1(self, sig, frame): self.log.reopen_files() def handle_exit(self, sig, frame): self.alive = False def handle_quit(self, sig, frame): self.alive = False # worker_int callback self.cfg.worker_int(self) time.sleep(0.1) sys.exit(0) def handle_abort(self, sig, frame): self.alive = False self.cfg.worker_abort(self) sys.exit(1) def handle_error(self, req, client, addr, exc): request_start = datetime.now() addr = addr or ('', -1) # unix socket case if isinstance(exc, ( InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion, InvalidHeader, InvalidHeaderName, LimitRequestLine, LimitRequestHeaders, InvalidProxyLine, ForbiddenProxyRequest, InvalidSchemeHeaders, UnsupportedTransferCoding, ConfigurationProblem, ObsoleteFolding, ExpectationFailed, SSLError, )): status_int = 400 reason = "Bad Request" if isinstance(exc, InvalidRequestLine): mesg = "Invalid Request Line '%s'" % str(exc) elif isinstance(exc, InvalidRequestMethod): mesg = "Invalid Method '%s'" % str(exc) elif isinstance(exc, InvalidHTTPVersion): mesg = "Invalid HTTP Version '%s'" % str(exc) elif isinstance(exc, UnsupportedTransferCoding): mesg = "%s" % str(exc) status_int = 501 elif isinstance(exc, ConfigurationProblem): mesg = "%s" % str(exc) status_int = 500 elif isinstance(exc, ObsoleteFolding): mesg = "%s" % str(exc) elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)): mesg = "%s" % str(exc) if not req and hasattr(exc, "req"): req = exc.req # for access log elif isinstance(exc, LimitRequestLine): mesg = "%s" % str(exc) elif isinstance(exc, ExpectationFailed): reason = "Expectation Failed" mesg = str(exc) status_int = 417 elif isinstance(exc, LimitRequestHeaders): reason = "Request Header Fields Too Large" mesg = "Error parsing headers: '%s'" % str(exc) status_int = 431 elif isinstance(exc, InvalidProxyLine): mesg = "'%s'" % str(exc) elif isinstance(exc, ForbiddenProxyRequest): reason = "Forbidden" mesg = "Request forbidden" status_int = 403 elif isinstance(exc, InvalidSchemeHeaders): mesg = "%s" % str(exc) elif isinstance(exc, SSLError): reason = "Forbidden" mesg = "'%s'" % str(exc) status_int = 403 msg = "Invalid request from ip={ip}: {error}" self.log.warning(msg.format(ip=addr[0], error=str(exc))) else: if hasattr(req, "uri"): self.log.exception("Error handling request %s", req.uri) else: self.log.exception("Error handling request (no URI read)") status_int = 500 reason = "Internal Server Error" mesg = "" if req is not None: request_time = datetime.now() - request_start environ = default_environ(req, client, self.cfg) environ['REMOTE_ADDR'] = addr[0] environ['REMOTE_PORT'] = str(addr[1]) resp = Response(req, client, self.cfg) resp.status = "%s %s" % (status_int, reason) resp.response_length = len(mesg) self.log.access(resp, req, environ, request_time) try: util.write_error(client, status_int, reason, mesg) except Exception: self.log.debug("Failed to send error message.") def handle_winch(self, sig, fname): # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD. self.log.debug("worker: SIGWINCH ignored.") benoitc-gunicorn-f5fb19e/gunicorn/workers/base_async.py000066400000000000000000000230431514360242400234660ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from datetime import datetime import errno import socket import ssl import sys from gunicorn import http from gunicorn.http import wsgi from gunicorn import util from gunicorn import sock as gunicorn_sock from gunicorn.workers import base ALREADY_HANDLED = object() class AsyncWorker(base.Worker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.worker_connections = self.cfg.worker_connections def timeout_ctx(self): raise NotImplementedError() def is_already_handled(self, respiter): # some workers will need to overload this function to raise a StopIteration return respiter == ALREADY_HANDLED def handle(self, listener, client, addr): req = None try: # Complete the handshake to ensure ALPN negotiation is done # (needed if do_handshake_on_connect is False) if isinstance(client, ssl.SSLSocket) and not self.cfg.do_handshake_on_connect: client.do_handshake() # Check if HTTP/2 was negotiated (for SSL connections) is_http2 = gunicorn_sock.is_http2_negotiated(client) if is_http2: # Handle HTTP/2 connection self.handle_http2(listener, client, addr) return parser = http.get_parser(self.cfg, client, addr) try: listener_name = listener.getsockname() if not self.cfg.keepalive: req = next(parser) self.handle_request(listener_name, req, client, addr) else: # keepalive loop proxy_protocol_info = {} while True: req = None with self.timeout_ctx(): req = next(parser) if not req: break if req.proxy_protocol_info: proxy_protocol_info = req.proxy_protocol_info else: req.proxy_protocol_info = proxy_protocol_info self.handle_request(listener_name, req, client, addr) except http.errors.NoMoreData as e: self.log.debug("Ignored premature client disconnection. %s", e) except StopIteration as e: self.log.debug("Closing connection. %s", e) except ssl.SSLError: # pass to next try-except level util.reraise(*sys.exc_info()) except OSError: # pass to next try-except level util.reraise(*sys.exc_info()) except Exception as e: self.handle_error(req, client, addr, e) except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_EOF: self.log.debug("ssl connection closed") client.close() else: self.log.debug("Error processing SSL request.") self.handle_error(req, client, addr, e) except OSError as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN): self.log.exception("Socket error processing request.") else: if e.errno == errno.ECONNRESET: self.log.debug("Ignoring connection reset") elif e.errno == errno.ENOTCONN: self.log.debug("Ignoring socket not connected") else: self.log.debug("Ignoring EPIPE") except BaseException as e: self.handle_error(req, client, addr, e) finally: util.close(client) def handle_http2(self, listener, client, addr): """Handle an HTTP/2 connection. Processes multiplexed HTTP/2 streams until the connection closes. """ listener_name = listener.getsockname() try: h2_conn = http.get_parser(self.cfg, client, addr, http2_connection=True) h2_conn.initiate_connection() while not h2_conn.is_closed and self.alive: try: requests = h2_conn.receive_data() except http.errors.NoMoreData: self.log.debug("HTTP/2 connection closed by client") break for req in requests: try: self.handle_http2_request(listener_name, req, client, addr, h2_conn) except Exception as e: self.log.exception("Error handling HTTP/2 request") try: h2_conn.send_error(req.stream.stream_id, 500, str(e)) except Exception: pass finally: h2_conn.cleanup_stream(req.stream.stream_id) except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_EOF: self.log.debug("HTTP/2 SSL connection closed") else: self.log.debug("HTTP/2 SSL error: %s", e) except OSError as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN): self.log.exception("HTTP/2 socket error") except Exception as e: self.log.exception("HTTP/2 connection error: %s", e) def handle_http2_request(self, listener_name, req, sock, addr, h2_conn): """Handle a single HTTP/2 request.""" stream_id = req.stream.stream_id request_start = datetime.now() environ = {} resp = None try: self.cfg.pre_request(self, req) resp, environ = wsgi.create(req, sock, addr, listener_name, self.cfg) environ["wsgi.multithread"] = True environ["HTTP_VERSION"] = "2" self.nr += 1 if self.nr >= self.max_requests: if self.alive: self.log.info("Autorestarting worker after current request.") self.alive = False # Run WSGI app respiter = self.wsgi(environ, resp.start_response) if self.is_already_handled(respiter): return # Collect response body response_body = b'' try: if hasattr(respiter, '__iter__'): for item in respiter: if item: response_body += item finally: if hasattr(respiter, "close"): respiter.close() # Send response via HTTP/2 h2_conn.send_response( stream_id, resp.status_code, resp.headers, response_body ) request_time = datetime.now() - request_start self.log.access(resp, req, environ, request_time) except Exception: self.log.exception("Error handling HTTP/2 request") raise finally: try: self.cfg.post_request(self, req, environ, resp) except Exception: self.log.exception("Exception in post_request hook") def handle_request(self, listener_name, req, sock, addr): request_start = datetime.now() environ = {} resp = None try: self.cfg.pre_request(self, req) resp, environ = wsgi.create(req, sock, addr, listener_name, self.cfg) environ["wsgi.multithread"] = True self.nr += 1 if self.nr >= self.max_requests: if self.alive: self.log.info("Autorestarting worker after current request.") self.alive = False if not self.alive or not self.cfg.keepalive: resp.force_close() respiter = self.wsgi(environ, resp.start_response) if self.is_already_handled(respiter): return False try: if isinstance(respiter, environ['wsgi.file_wrapper']): resp.write_file(respiter) else: for item in respiter: resp.write(item) resp.close() finally: request_time = datetime.now() - request_start self.log.access(resp, req, environ, request_time) if hasattr(respiter, "close"): respiter.close() if resp.should_close(): raise StopIteration() except StopIteration: raise except OSError: # If the original exception was a socket.error we delegate # handling it to the caller (where handle() might ignore it) util.reraise(*sys.exc_info()) except Exception: if resp and resp.headers_sent: # If the requests have already been sent, we should close the # connection to indicate the error. self.log.exception("Error handling request") try: sock.shutdown(socket.SHUT_RDWR) sock.close() except OSError: pass raise StopIteration() raise finally: try: self.cfg.post_request(self, req, environ, resp) except Exception: self.log.exception("Exception in post_request hook") return True benoitc-gunicorn-f5fb19e/gunicorn/workers/gasgi.py000066400000000000000000000247111514360242400224540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI worker for gunicorn. Provides native asyncio-based ASGI support using gunicorn's own HTTP parsing infrastructure. """ import asyncio import os import signal import sys from gunicorn.workers import base from gunicorn.asgi.protocol import ASGIProtocol class ASGIWorker(base.Worker): """ASGI worker using asyncio event loop. Supports: - HTTP/1.1 with keepalive - WebSocket connections - Lifespan protocol (startup/shutdown hooks) - Optional uvloop for improved performance """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.worker_connections = self.cfg.worker_connections self.loop = None self.servers = [] self.nr_conns = 0 self.lifespan = None self.state = {} # Shared state for lifespan self._quick_shutdown = False # True for SIGINT/SIGQUIT (immediate), False for SIGTERM (graceful) @classmethod def check_config(cls, cfg, log): """Validate configuration for ASGI worker.""" if cfg.threads > 1: log.warning("ASGI worker does not use threads configuration. " "Use worker_connections instead.") def init_process(self): """Initialize the worker process.""" # Setup event loop before calling super() self._setup_event_loop() super().init_process() def _setup_event_loop(self): """Setup the asyncio event loop.""" loop_type = getattr(self.cfg, 'asgi_loop', 'auto') if loop_type == "auto": try: import uvloop loop_type = "uvloop" except ImportError: loop_type = "asyncio" if loop_type == "uvloop": try: import uvloop self.loop = uvloop.new_event_loop() self.log.debug("Using uvloop event loop") except ImportError: self.log.warning("uvloop not available, falling back to asyncio") self.loop = asyncio.new_event_loop() else: self.loop = asyncio.new_event_loop() self.log.debug("Using asyncio event loop") asyncio.set_event_loop(self.loop) def load_wsgi(self): """Load the ASGI application.""" try: self.asgi = self.app.wsgi() except SyntaxError as e: if not self.cfg.reload: raise self.log.exception(e) self.asgi = self._make_error_app(str(e)) def _make_error_app(self, error_msg): """Create an error ASGI app for syntax errors during reload.""" async def error_app(scope, receive, send): if scope["type"] == "http": await send({ "type": "http.response.start", "status": 500, "headers": [(b"content-type", b"text/plain")], }) await send({ "type": "http.response.body", "body": f"Application error: {error_msg}".encode(), }) elif scope["type"] == "lifespan": message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) message = await receive() if message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return error_app def init_signals(self): """Initialize signal handlers for asyncio.""" # Reset all signals first for s in self.SIGNALS: signal.signal(s, signal.SIG_DFL) # Set up signal handlers via the event loop self.loop.add_signal_handler(signal.SIGQUIT, self.handle_quit_signal) self.loop.add_signal_handler(signal.SIGTERM, self.handle_exit_signal) self.loop.add_signal_handler(signal.SIGINT, self.handle_quit_signal) self.loop.add_signal_handler(signal.SIGUSR1, self.handle_usr1_signal) self.loop.add_signal_handler(signal.SIGWINCH, self.handle_winch_signal) self.loop.add_signal_handler(signal.SIGABRT, self.handle_abort_signal) def handle_quit_signal(self): """Handle SIGQUIT/SIGINT - immediate shutdown.""" self._quick_shutdown = True if not self.alive: # Already shutting down (SIGTERM was sent) - wake up the loop return self.alive = False self.cfg.worker_int(self) def handle_exit_signal(self): """Handle SIGTERM - graceful shutdown.""" self.alive = False def handle_usr1_signal(self): """Handle SIGUSR1 - reopen log files.""" self.log.reopen_files() def handle_winch_signal(self): """Handle SIGWINCH - ignored in worker.""" self.log.debug("worker: SIGWINCH ignored.") def handle_abort_signal(self): """Handle SIGABRT - abort.""" self.alive = False self.cfg.worker_abort(self) sys.exit(1) def run(self): """Main entry point for the worker.""" try: self.loop.run_until_complete(self._serve()) except Exception as e: self.log.exception("Worker exception: %s", e) finally: self._cleanup() async def _serve(self): """Main async serving loop.""" # Run lifespan startup lifespan_mode = getattr(self.cfg, 'asgi_lifespan', 'auto') if lifespan_mode != "off": from gunicorn.asgi.lifespan import LifespanManager self.lifespan = LifespanManager(self.asgi, self.log, self.state) try: await self.lifespan.startup() except Exception as e: if lifespan_mode == "on": self.log.error("ASGI lifespan startup failed: %s", e) return else: # auto mode - app doesn't support lifespan self.log.debug("ASGI lifespan not supported by app: %s", e) self.lifespan = None # Create servers for each listener socket ssl_context = self._get_ssl_context() for sock in self.sockets: try: server = await self.loop.create_server( lambda: ASGIProtocol(self), sock=sock.sock, ssl=ssl_context, reuse_address=True, start_serving=True, ) self.servers.append(server) self.log.info("ASGI server listening on %s", sock) except Exception as e: self.log.error("Failed to create server on %s: %s", sock, e) if not self.servers: self.log.error("No servers could be started") return # Main loop with heartbeat try: while self.alive: self.notify() # Check if parent is still alive if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break # Check connection limit # (Connections are managed by nr_conns in ASGIProtocol) await asyncio.sleep(1.0) except asyncio.CancelledError: pass # Graceful shutdown await self._shutdown() async def _shutdown(self): """Perform graceful shutdown.""" self.log.info("Worker shutting down...") # Stop accepting new connections for server in self.servers: server.close() # Wait for servers to close (skip on quick shutdown) if not self._quick_shutdown: for server in self.servers: if self._quick_shutdown: break try: await asyncio.wait_for(server.wait_closed(), timeout=0.5) except asyncio.TimeoutError: pass # Check _quick_shutdown on next iteration # Wait for in-flight connections (skip on quick shutdown) if self.nr_conns > 0 and not self._quick_shutdown: graceful_timeout = self.cfg.graceful_timeout self.log.info("Waiting for %d connections to finish...", self.nr_conns) deadline = self.loop.time() + graceful_timeout while self.nr_conns > 0 and self.loop.time() < deadline: if self._quick_shutdown: self.log.info("Quick shutdown requested") break await asyncio.sleep(0.1) if self.nr_conns > 0: self.log.warning("Forcing close of %d connections", self.nr_conns) # Run lifespan shutdown (skip on quick shutdown) if self.lifespan and not self._quick_shutdown: try: await self.lifespan.shutdown() except Exception as e: self.log.error("ASGI lifespan shutdown error: %s", e) def _get_ssl_context(self): """Get SSL context if configured.""" if not self.cfg.is_ssl: return None try: from gunicorn import sock return sock.ssl_context(self.cfg) except Exception as e: self.log.error("Failed to create SSL context: %s", e) return None def _cleanup(self): """Clean up resources on exit.""" try: # Cancel all pending tasks pending = asyncio.all_tasks(self.loop) for task in pending: task.cancel() # Run loop until all tasks are cancelled (with timeout on quick exit) if pending: gather = asyncio.gather(*pending, return_exceptions=True) if self._quick_shutdown: # Quick exit - don't wait long for tasks to cancel try: self.loop.run_until_complete( asyncio.wait_for(gather, timeout=1.0) ) except asyncio.TimeoutError: self.log.debug("Timeout waiting for tasks to cancel") else: self.loop.run_until_complete(gather) self.loop.close() except Exception as e: self.log.debug("Cleanup error: %s", e) # Close sockets for s in self.sockets: try: s.close() except Exception: pass benoitc-gunicorn-f5fb19e/gunicorn/workers/geventlet.py000066400000000000000000000170741514360242400233630ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # DEPRECATION NOTICE: The eventlet worker is deprecated and will be removed # in Gunicorn 26.0. Eventlet itself is deprecated and no longer maintained. # Please migrate to gevent, gthread, or another supported worker type. # See: https://eventlet.readthedocs.io/en/latest/asyncio/migration.html import warnings warnings.warn( "The eventlet worker is deprecated and will be removed in Gunicorn 26.0. " "Please migrate to gevent, gthread, or another supported worker type. " "See: https://docs.gunicorn.org/en/stable/design.html#choosing-a-worker-type", DeprecationWarning, stacklevel=2 ) # NOTE: eventlet import and monkey_patch() must happen before any other imports # to ensure all standard library modules are properly patched. try: import eventlet except ImportError: raise RuntimeError("eventlet worker requires eventlet 0.40.3 or higher") else: from packaging.version import parse as parse_version if parse_version(eventlet.__version__) < parse_version('0.40.3'): raise RuntimeError("eventlet worker requires eventlet 0.40.3 or higher") # Perform monkey patching early, before importing other modules. # This ensures that all subsequent imports get the patched versions. # NOTE: hubs.use_hub() must NOT be called here - it creates OS resources # (like kqueue on macOS) that don't survive fork. It must be called in # each worker process after fork, in the patch() method. eventlet.monkey_patch() from functools import partial # noqa: E402 import sys # noqa: E402 from eventlet import hubs, greenthread # noqa: E402 from eventlet.greenio import GreenSocket # noqa: E402 import eventlet.wsgi # noqa: E402 import greenlet # noqa: E402 from gunicorn.workers.base_async import AsyncWorker # noqa: E402 from gunicorn.sock import ssl_wrap_socket # noqa: E402 # ALREADY_HANDLED is removed in 0.30.3+ now it's `WSGI_LOCAL.already_handled: bool` # https://github.com/eventlet/eventlet/pull/544 EVENTLET_WSGI_LOCAL = getattr(eventlet.wsgi, "WSGI_LOCAL", None) EVENTLET_ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) def _eventlet_socket_sendfile(self, file, offset=0, count=None): # Based on the implementation in gevent which in turn is slightly # modified from the standard library implementation. if self.gettimeout() == 0: raise ValueError("non-blocking sockets are not supported") if offset: file.seek(offset) blocksize = min(count, 8192) if count else 8192 total_sent = 0 # localize variable access to minimize overhead file_read = file.read sock_send = self.send try: while True: if count: blocksize = min(count - total_sent, blocksize) if blocksize <= 0: break data = memoryview(file_read(blocksize)) if not data: break # EOF while True: try: sent = sock_send(data) except BlockingIOError: continue else: total_sent += sent if sent < len(data): data = data[sent:] else: break return total_sent finally: if total_sent > 0 and hasattr(file, 'seek'): file.seek(offset + total_sent) def _eventlet_serve(sock, handle, concurrency): """ Serve requests forever. This code is nearly identical to ``eventlet.convenience.serve`` except that it attempts to join the pool at the end, which allows for gunicorn graceful shutdowns. """ pool = eventlet.greenpool.GreenPool(concurrency) server_gt = eventlet.greenthread.getcurrent() while True: try: conn, addr = sock.accept() gt = pool.spawn(handle, conn, addr) gt.link(_eventlet_stop, server_gt, conn) conn, addr, gt = None, None, None except eventlet.StopServe: sock.close() pool.waitall() return def _eventlet_stop(client, server, conn): """ Stop a greenlet handling a request and close its connection. This code is lifted from eventlet so as not to depend on undocumented functions in the library. """ try: try: client.wait() finally: conn.close() except greenlet.GreenletExit: pass except Exception: greenthread.kill(server, *sys.exc_info()) def patch_sendfile(): # As of eventlet 0.25.1, GreenSocket.sendfile doesn't exist, # meaning the native implementations of socket.sendfile will be used. # If os.sendfile exists, it will attempt to use that, failing explicitly # if the socket is in non-blocking mode, which the underlying # socket object /is/. Even the regular _sendfile_use_send will # fail in that way; plus, it would use the underlying socket.send which isn't # properly cooperative. So we have to monkey-patch a working socket.sendfile() # into GreenSocket; in this method, `self.send` will be the GreenSocket's # send method which is properly cooperative. if not hasattr(GreenSocket, 'sendfile'): GreenSocket.sendfile = _eventlet_socket_sendfile class EventletWorker(AsyncWorker): def patch(self): # NOTE: eventlet.monkey_patch() is called at module import time to # ensure all imports are properly patched. However, hubs.use_hub() # must be called here (after fork) because it creates OS resources # like kqueue that don't survive fork. hubs.use_hub() patch_sendfile() def is_already_handled(self, respiter): # eventlet >= 0.30.3 if getattr(EVENTLET_WSGI_LOCAL, "already_handled", None): raise StopIteration() # eventlet < 0.30.3 if respiter == EVENTLET_ALREADY_HANDLED: raise StopIteration() return super().is_already_handled(respiter) def init_process(self): self.log.warning( "The eventlet worker is DEPRECATED and will be removed in Gunicorn 26.0. " "Please migrate to gevent, gthread, or another supported worker type." ) self.patch() super().init_process() def handle_quit(self, sig, frame): eventlet.spawn(super().handle_quit, sig, frame) def handle_usr1(self, sig, frame): eventlet.spawn(super().handle_usr1, sig, frame) def timeout_ctx(self): return eventlet.Timeout(self.cfg.keepalive or None, False) def handle(self, listener, client, addr): if self.cfg.is_ssl: client = ssl_wrap_socket(client, self.cfg) super().handle(listener, client, addr) def run(self): acceptors = [] for sock in self.sockets: gsock = GreenSocket(sock) gsock.setblocking(1) hfun = partial(self.handle, gsock) acceptor = eventlet.spawn(_eventlet_serve, gsock, hfun, self.worker_connections) acceptors.append(acceptor) eventlet.sleep(0.0) while self.alive: self.notify() eventlet.sleep(1.0) self.notify() t = None try: with eventlet.Timeout(self.cfg.graceful_timeout) as t: for a in acceptors: a.kill(eventlet.StopServe()) for a in acceptors: a.wait() except eventlet.Timeout as te: if te != t: raise for a in acceptors: a.kill() benoitc-gunicorn-f5fb19e/gunicorn/workers/ggevent.py000066400000000000000000000133751514360242400230250ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import sys from datetime import datetime from functools import partial import time try: import gevent except ImportError: raise RuntimeError("gevent worker requires gevent 24.10.1 or higher") else: from packaging.version import parse as parse_version if parse_version(gevent.__version__) < parse_version('24.10.1'): raise RuntimeError("gevent worker requires gevent 24.10.1 or higher") from gevent.pool import Pool from gevent.server import StreamServer from gevent import hub, monkey, socket, pywsgi import gunicorn from gunicorn.http.wsgi import base_environ from gunicorn.sock import ssl_context from gunicorn.workers.base_async import AsyncWorker VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__) class GeventWorker(AsyncWorker): server_class = None wsgi_handler = None def patch(self): monkey.patch_all() # patch sockets sockets = [] for s in self.sockets: sockets.append(socket.socket(s.FAMILY, socket.SOCK_STREAM, fileno=s.sock.detach())) self.sockets = sockets def notify(self): super().notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) sys.exit(0) def timeout_ctx(self): return gevent.Timeout(self.cfg.keepalive, False) def run(self): servers = [] ssl_args = {} if self.cfg.is_ssl: ssl_args = {"ssl_context": ssl_context(self.cfg)} for s in self.sockets: s.setblocking(1) pool = Pool(self.worker_connections) if self.server_class is not None: environ = base_environ(self.cfg) environ.update({ "wsgi.multithread": True, "SERVER_SOFTWARE": VERSION, }) server = self.server_class( s, application=self.wsgi, spawn=pool, log=self.log, handler_class=self.wsgi_handler, environ=environ, **ssl_args) else: hfun = partial(self.handle, s) server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args) if self.cfg.workers > 1: server.max_accept = 1 server.start() servers.append(server) while self.alive: self.notify() gevent.sleep(1.0) try: # Stop accepting requests for server in servers: server.close() # Handle current requests until graceful_timeout ts = time.time() while time.time() - ts <= self.cfg.graceful_timeout: accepting = 0 for server in servers: if server.pool.free_count() != server.pool.size: accepting += 1 # if no server is accepting a connection, we can exit if not accepting: return self.notify() gevent.sleep(1.0) # Force kill all the active handlers self.log.warning("Worker graceful timeout (pid:%s)", self.pid) for server in servers: server.stop(timeout=1) except Exception: pass def handle(self, listener, client, addr): # Connected socket timeout defaults to socket.getdefaulttimeout(). # This forces to blocking mode. client.setblocking(1) super().handle(listener, client, addr) def handle_request(self, listener_name, req, sock, addr): try: super().handle_request(listener_name, req, sock, addr) except gevent.GreenletExit: pass except SystemExit: pass def handle_quit(self, sig, frame): # Move this out of the signal handler so we can use # blocking calls. See #1126 gevent.spawn(super().handle_quit, sig, frame) def handle_usr1(self, sig, frame): # Make the gevent workers handle the usr1 signal # by deferring to a new greenlet. See #1645 gevent.spawn(super().handle_usr1, sig, frame) def init_process(self): self.patch() hub.reinit() super().init_process() class GeventResponse: status = None headers = None sent = None def __init__(self, status, headers, clength): self.status = status self.headers = headers self.sent = clength class PyWSGIHandler(pywsgi.WSGIHandler): def log_request(self): start = datetime.fromtimestamp(self.time_start) finish = datetime.fromtimestamp(self.time_finish) response_time = finish - start resp_headers = getattr(self, 'response_headers', {}) # Status is expected to be a string but is encoded to bytes in gevent for PY3 # Except when it isn't because gevent uses hardcoded strings for network errors. status = self.status.decode() if isinstance(self.status, bytes) else self.status resp = GeventResponse(status, resp_headers, self.response_length) if hasattr(self, 'headers'): req_headers = self.headers.items() else: req_headers = [] self.server.log.access(resp, req_headers, self.environ, response_time) def get_environ(self): env = super().get_environ() env['gunicorn.sock'] = self.socket env['RAW_URI'] = self.path return env class PyWSGIServer(pywsgi.WSGIServer): pass class GeventPyWSGIWorker(GeventWorker): "The Gevent StreamServer based workers." server_class = PyWSGIServer wsgi_handler = PyWSGIHandler benoitc-gunicorn-f5fb19e/gunicorn/workers/gthread.py000066400000000000000000000534701514360242400230040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # design: # A threaded worker accepts connections in the main loop, accepted # connections are added to the thread pool as a connection job. # Keepalive connections are put back in the loop waiting for an event. # If no event happen after the keep alive timeout, the connection is # closed. # pylint: disable=no-else-break from concurrent import futures import errno import os import queue import selectors import socket import ssl import sys import time from collections import deque from datetime import datetime from functools import partial from . import base from .. import http from .. import util from .. import sock from ..http import wsgi class TConn: def __init__(self, cfg, sock, client, server): self.cfg = cfg self.sock = sock self.client = client self.server = server self.timeout = None self.parser = None self.initialized = False self.is_http2 = False # set the socket to non blocking self.sock.setblocking(False) def init(self): # Guard against double initialization if self.initialized: return self.initialized = True self.sock.setblocking(True) if self.parser is None: # wrap the socket if needed if self.cfg.is_ssl: self.sock = sock.ssl_wrap_socket(self.sock, self.cfg) # Complete the handshake to ensure ALPN negotiation is done # (needed if do_handshake_on_connect is False) if not self.cfg.do_handshake_on_connect: self.sock.do_handshake() # Check if HTTP/2 was negotiated via ALPN if sock.is_http2_negotiated(self.sock): self.is_http2 = True self.parser = http.get_parser( self.cfg, self.sock, self.client, http2_connection=True ) self.parser.initiate_connection() return # initialize the HTTP/1.x parser self.parser = http.get_parser(self.cfg, self.sock, self.client) def set_timeout(self): # Use monotonic clock for reliability (time.time() can jump due to NTP) self.timeout = time.monotonic() + self.cfg.keepalive def close(self): util.close(self.sock) class PollableMethodQueue: """Thread-safe queue that can wake up a selector. Uses a pipe to allow worker threads to signal the main thread when work is ready, enabling lock-free coordination. This approach is compatible with all POSIX systems including Linux, macOS, FreeBSD, OpenBSD, and NetBSD. The pipe is set to non-blocking mode to prevent worker threads from blocking if the pipe buffer fills up under extreme load. """ def __init__(self): self._read_fd = None self._write_fd = None self._queue = None def init(self): """Initialize the pipe and queue.""" self._read_fd, self._write_fd = os.pipe() # Set both ends to non-blocking: # - Write: prevents worker threads from blocking if buffer is full # - Read: allows run_callbacks to drain without blocking os.set_blocking(self._read_fd, False) os.set_blocking(self._write_fd, False) self._queue = queue.SimpleQueue() def close(self): """Close the pipe file descriptors.""" if self._read_fd is not None: try: os.close(self._read_fd) except OSError: pass if self._write_fd is not None: try: os.close(self._write_fd) except OSError: pass def fileno(self): """Return the readable file descriptor for selector registration.""" return self._read_fd def defer(self, callback, *args): """Queue a callback to be run on the main thread. The callback is added to the queue first, then a wake-up byte is written to the pipe. If the pipe write fails (buffer full), it's safe to ignore because the main thread will eventually drain the queue when it reads other wake-up bytes. """ self._queue.put(partial(callback, *args)) try: os.write(self._write_fd, b'\x00') except OSError: # Pipe buffer full (EAGAIN/EWOULDBLOCK) - safe to ignore # The main thread will still process the queue pass def run_callbacks(self, _fileobj, max_callbacks=50): """Run queued callbacks. Called when the pipe is readable. Drains all available wake-up bytes and runs corresponding callbacks. The max_callbacks limit prevents starvation of other event sources. """ # Read all available wake-up bytes (up to limit) try: data = os.read(self._read_fd, max_callbacks) except OSError: return # Run callbacks for each byte read, plus any extras in queue # (extras can accumulate if pipe writes were dropped) callbacks_run = 0 while callbacks_run < len(data) + 10: # +10 to drain dropped writes try: callback = self._queue.get_nowait() callback() callbacks_run += 1 except queue.Empty: break class ThreadWorker(base.Worker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.worker_connections = self.cfg.worker_connections self.max_keepalived = self.cfg.worker_connections - self.cfg.threads self.tpool = None self.poller = None self.method_queue = PollableMethodQueue() self.keepalived_conns = deque() self.nr_conns = 0 self._accepting = False @classmethod def check_config(cls, cfg, log): max_keepalived = cfg.worker_connections - cfg.threads if max_keepalived <= 0 and cfg.keepalive: log.warning("No keepalived connections can be handled. " + "Check the number of worker connections and threads.") def init_process(self): self.tpool = self.get_thread_pool() self.poller = selectors.DefaultSelector() self.method_queue.init() super().init_process() def get_thread_pool(self): """Override this method to customize how the thread pool is created""" return futures.ThreadPoolExecutor(max_workers=self.cfg.threads) def handle_exit(self, sig, frame): """Handle SIGTERM - begin graceful shutdown.""" if self.alive: self.alive = False # Wake up the poller so it can start shutdown self.method_queue.defer(lambda: None) def handle_quit(self, sig, frame): """Handle SIGQUIT - immediate shutdown.""" self.tpool.shutdown(wait=False) super().handle_quit(sig, frame) def set_accept_enabled(self, enabled): """Enable or disable accepting new connections.""" if enabled == self._accepting: return for listener in self.sockets: if enabled: listener.setblocking(False) self.poller.register(listener, selectors.EVENT_READ, self.accept) else: self.poller.unregister(listener) self._accepting = enabled def enqueue_req(self, conn): """Submit connection to thread pool for processing.""" fs = self.tpool.submit(self.handle, conn) fs.add_done_callback( lambda fut: self.method_queue.defer(self.finish_request, conn, fut)) def accept(self, listener): """Accept a new connection from a listener socket.""" try: client_sock, client_addr = listener.accept() self.nr_conns += 1 client_sock.setblocking(True) conn = TConn(self.cfg, client_sock, client_addr, listener.getsockname()) # Submit directly to thread pool for processing self.enqueue_req(conn) except OSError as e: if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, errno.EWOULDBLOCK): raise def on_client_socket_readable(self, conn, client): """Handle a keepalive connection becoming readable.""" self.poller.unregister(client) self.keepalived_conns.remove(conn) # Submit to thread pool for processing self.enqueue_req(conn) def murder_keepalived(self): """Close expired keepalive connections.""" now = time.monotonic() while self.keepalived_conns: conn = self.keepalived_conns[0] delta = conn.timeout - now if delta > 0: break # Connection has timed out self.keepalived_conns.popleft() try: self.poller.unregister(conn.sock) except (OSError, KeyError, ValueError): pass # Already unregistered self.nr_conns -= 1 conn.close() def is_parent_alive(self): # If our parent changed then we shut down. if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) return False return True def wait_for_and_dispatch_events(self, timeout): """Wait for events and dispatch callbacks.""" try: events = self.poller.select(timeout) for key, _ in events: callback = key.data callback(key.fileobj) except OSError as e: if e.errno != errno.EINTR: raise def run(self): # Register the method queue with the poller self.poller.register(self.method_queue.fileno(), selectors.EVENT_READ, self.method_queue.run_callbacks) # Start accepting connections self.set_accept_enabled(True) while self.alive: # Notify the arbiter we are alive self.notify() # Check if we can accept more connections can_accept = self.nr_conns < self.worker_connections if can_accept != self._accepting: self.set_accept_enabled(can_accept) # Wait for events (unified event loop - no futures.wait()) self.wait_for_and_dispatch_events(timeout=1.0) if not self.is_parent_alive(): break # Handle keepalive timeouts self.murder_keepalived() # Graceful shutdown: stop accepting but handle existing connections self.set_accept_enabled(False) # Wait for in-flight connections within grace period graceful_timeout = time.monotonic() + self.cfg.graceful_timeout while self.nr_conns > 0: time_remaining = max(graceful_timeout - time.monotonic(), 0) if time_remaining == 0: break self.wait_for_and_dispatch_events(timeout=time_remaining) self.murder_keepalived() # Cleanup self.tpool.shutdown(wait=False) self.poller.close() self.method_queue.close() for s in self.sockets: s.close() def finish_request(self, conn, fs): """Handle completion of a request (called via method_queue on main thread).""" try: keepalive = not fs.cancelled() and fs.result() if keepalive and self.alive: # Put connection back in the poller for keepalive conn.sock.setblocking(False) conn.set_timeout() self.keepalived_conns.append(conn) self.poller.register(conn.sock, selectors.EVENT_READ, partial(self.on_client_socket_readable, conn)) else: self.nr_conns -= 1 conn.close() except Exception: self.nr_conns -= 1 conn.close() def handle(self, conn): """Handle a request on a connection. Runs in a worker thread.""" req = None try: # Always ensure blocking mode in worker thread. # Critical for keepalive connections: the socket is set to non-blocking # for the selector in finish_request(), but must be blocking for # request/body reading to avoid SSLWantReadError on SSL connections. conn.sock.setblocking(True) # Initialize connection in worker thread to handle SSL errors gracefully # (ENOTCONN from ssl_wrap_socket would crash main thread otherwise) conn.init() # HTTP/2 connections require special handling if conn.is_http2: return self.handle_http2(conn) req = next(conn.parser) if not req: return False # Handle the request keepalive = self.handle_request(req, conn) if keepalive: # Discard any unread request body before keepalive # to prevent socket appearing readable due to leftover bytes conn.parser.finish_body() return True except http.errors.NoMoreData as e: self.log.debug("Ignored premature client disconnection. %s", e) except StopIteration as e: self.log.debug("Closing connection. %s", e) except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_EOF: self.log.debug("ssl connection closed") conn.sock.close() else: self.log.debug("Error processing SSL request.") self.handle_error(req, conn.sock, conn.client, e) except OSError as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN): self.log.exception("Socket error processing request.") else: if e.errno == errno.ECONNRESET: self.log.debug("Ignoring connection reset") elif e.errno == errno.ENOTCONN: self.log.debug("Ignoring socket not connected") else: self.log.debug("Ignoring connection epipe") except Exception as e: self.handle_error(req, conn.sock, conn.client, e) return False def handle_http2(self, conn): """Handle an HTTP/2 connection. Runs in a worker thread. HTTP/2 connections are persistent and multiplex multiple streams. We handle all streams until the connection is closed. Returns: False (HTTP/2 connections don't use keepalive polling) """ h2_conn = conn.parser # HTTP2ServerConnection try: while not h2_conn.is_closed and self.alive: # Receive data and get completed requests requests = h2_conn.receive_data() for req in requests: try: self.handle_http2_request(req, conn, h2_conn) except Exception as e: self.log.exception("Error handling HTTP/2 request") try: h2_conn.send_error(req.stream.stream_id, 500, str(e)) except Exception: pass finally: # Cleanup stream after processing h2_conn.cleanup_stream(req.stream.stream_id) # Check if we need to close if not self.alive: h2_conn.close() break except http.errors.NoMoreData: self.log.debug("HTTP/2 connection closed by client") except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_EOF: self.log.debug("HTTP/2 SSL connection closed") else: self.log.debug("HTTP/2 SSL error: %s", e) except OSError as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN): self.log.exception("HTTP/2 socket error") except Exception: self.log.exception("HTTP/2 connection error") return False def handle_http2_request(self, req, conn, h2_conn): """Handle a single HTTP/2 request/stream.""" environ = {} resp = None stream_id = req.stream.stream_id try: self.cfg.pre_request(self, req) request_start = datetime.now() # Create WSGI environ resp, environ = wsgi.create(req, conn.sock, conn.client, conn.server, self.cfg) environ["wsgi.multithread"] = True environ["HTTP_VERSION"] = "2" # Indicate HTTP/2 # Replace wsgi.early_hints with HTTP/2-specific version def send_early_hints_h2(headers): """Send 103 Early Hints over HTTP/2.""" h2_conn.send_informational(stream_id, 103, headers) environ["wsgi.early_hints"] = send_early_hints_h2 # Add HTTP/2 trailer support pending_trailers = [] def send_trailers_h2(trailers): """Queue trailers to be sent after response body.""" pending_trailers.extend(trailers) environ["gunicorn.http2.send_trailers"] = send_trailers_h2 self.nr += 1 if self.nr >= self.max_requests: if self.alive: self.log.info("Autorestarting worker after current request.") self.alive = False # Run WSGI app respiter = self.wsgi(environ, resp.start_response) # Collect response body response_body = b'' try: if hasattr(respiter, '__iter__'): for item in respiter: if item: response_body += item finally: if hasattr(respiter, "close"): respiter.close() # Send response via HTTP/2 if pending_trailers: # Send headers, body, then trailers separately # Build response headers with :status pseudo-header response_headers = [(':status', str(resp.status_code))] for name, value in resp.headers: response_headers.append((name.lower(), str(value))) # Send headers without ending stream h2_conn.h2_conn.send_headers(stream_id, response_headers, end_stream=False) stream = h2_conn.streams[stream_id] stream.send_headers(response_headers, end_stream=False) h2_conn._send_pending_data() # Send body without ending stream if response_body: h2_conn.h2_conn.send_data(stream_id, response_body, end_stream=False) stream.send_data(response_body, end_stream=False) h2_conn._send_pending_data() # Send trailers (ends stream) h2_conn.send_trailers(stream_id, pending_trailers) else: # No trailers, use standard response h2_conn.send_response( stream_id, resp.status_code, resp.headers, response_body ) request_time = datetime.now() - request_start self.log.access(resp, req, environ, request_time) finally: try: self.cfg.post_request(self, req, environ, resp) except Exception: self.log.exception("Exception in post_request hook") def handle_request(self, req, conn): environ = {} resp = None try: self.cfg.pre_request(self, req) request_start = datetime.now() resp, environ = wsgi.create(req, conn.sock, conn.client, conn.server, self.cfg) environ["wsgi.multithread"] = True self.nr += 1 if self.nr >= self.max_requests: if self.alive: self.log.info("Autorestarting worker after current request.") self.alive = False resp.force_close() if not self.alive or not self.cfg.keepalive: resp.force_close() elif len(self.keepalived_conns) >= self.max_keepalived: resp.force_close() respiter = self.wsgi(environ, resp.start_response) try: if isinstance(respiter, environ['wsgi.file_wrapper']): resp.write_file(respiter) else: for item in respiter: resp.write(item) resp.close() finally: request_time = datetime.now() - request_start self.log.access(resp, req, environ, request_time) if hasattr(respiter, "close"): respiter.close() if resp.should_close(): self.log.debug("Closing connection.") return False except OSError: # pass to next try-except level util.reraise(*sys.exc_info()) except Exception: if resp and resp.headers_sent: # If the requests have already been sent, we should close the # connection to indicate the error. self.log.exception("Error handling request") try: conn.sock.shutdown(socket.SHUT_RDWR) conn.sock.close() except OSError: pass raise StopIteration() raise finally: try: self.cfg.post_request(self, req, environ, resp) except Exception: self.log.exception("Exception in post_request hook") return True benoitc-gunicorn-f5fb19e/gunicorn/workers/gtornado.py000066400000000000000000000074661514360242400232070ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import sys try: import tornado except ImportError: raise RuntimeError("You need tornado installed to use this worker.") import tornado.web import tornado.httpserver from tornado.ioloop import IOLoop, PeriodicCallback from tornado.wsgi import WSGIContainer from gunicorn.workers.base import Worker from gunicorn import __version__ as gversion from gunicorn.sock import ssl_context class TornadoWorker(Worker): @classmethod def setup(cls): web = sys.modules.pop("tornado.web") old_clear = web.RequestHandler.clear def clear(self): old_clear(self) if "Gunicorn" not in self._headers["Server"]: self._headers["Server"] += " (Gunicorn/%s)" % gversion web.RequestHandler.clear = clear sys.modules["tornado.web"] = web def handle_exit(self, sig, frame): if self.alive: super().handle_exit(sig, frame) def handle_request(self): self.nr += 1 if self.alive and self.nr >= self.max_requests: self.log.info("Autorestarting worker after current request.") self.alive = False def watchdog(self): if self.alive: self.notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) self.alive = False def heartbeat(self): if not self.alive: if self.server_alive: if hasattr(self, 'server'): try: self.server.stop() except Exception: pass self.server_alive = False else: for callback in self.callbacks: callback.stop() self.ioloop.stop() def init_process(self): # IOLoop cannot survive a fork or be shared across processes # in any way. When multiple processes are being used, each process # should create its own IOLoop. We should clear current IOLoop # if exists before os.fork. IOLoop.clear_current() super().init_process() def run(self): self.ioloop = IOLoop.instance() self.alive = True self.server_alive = False # Warn if HTTP/2 is requested - tornado worker doesn't support it if 'h2' in self.cfg.http_protocols: self.log.warning( "HTTP/2 is not supported by the tornado worker. " "Use gthread, gevent, eventlet, or asgi workers for HTTP/2 support. " "Falling back to HTTP/1.1 only." ) self.callbacks = [] self.callbacks.append(PeriodicCallback(self.watchdog, 1000)) self.callbacks.append(PeriodicCallback(self.heartbeat, 1000)) for callback in self.callbacks: callback.start() # Assume the app is a WSGI callable if its not an # instance of tornado.web.Application or WSGIContainer app = self.wsgi if not isinstance(app, WSGIContainer) and \ not isinstance(app, tornado.web.Application): app = WSGIContainer(app) class _HTTPServer(tornado.httpserver.HTTPServer): def on_close(instance, server_conn): self.handle_request() super().on_close(server_conn) if self.cfg.is_ssl: server = _HTTPServer(app, ssl_options=ssl_context(self.cfg)) else: server = _HTTPServer(app) self.server = server self.server_alive = True for s in self.sockets: s.setblocking(0) server.add_socket(s) server.no_keep_alive = self.cfg.keepalive <= 0 server.start(num_processes=1) self.ioloop.start() benoitc-gunicorn-f5fb19e/gunicorn/workers/sync.py000066400000000000000000000166071514360242400223430ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # from datetime import datetime import errno import os import select import socket import ssl import sys from gunicorn import http from gunicorn.http import wsgi from gunicorn import sock from gunicorn import util from gunicorn.workers import base class StopWaiting(Exception): """ exception raised to stop waiting for a connection """ class SyncWorker(base.Worker): def accept(self, listener): client, addr = listener.accept() client.setblocking(1) util.close_on_exec(client) self.handle(listener, client, addr) def wait(self, timeout): try: self.notify() ret = select.select(self.wait_fds, [], [], timeout) if ret[0]: if self.PIPE[0] in ret[0]: os.read(self.PIPE[0], 1) return ret[0] except OSError as e: if e.args[0] == errno.EINTR: return self.sockets if e.args[0] == errno.EBADF: if self.nr < 0: return self.sockets else: raise StopWaiting raise def is_parent_alive(self): # If our parent changed then we shut down. if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) return False return True def run_for_one(self, timeout): listener = self.sockets[0] while self.alive: self.notify() # Accept a connection. If we get an error telling us # that no connection is waiting we fall down to the # select which is where we'll wait for a bit for new # workers to come give us some love. try: self.accept(listener) # Keep processing clients until no one is waiting. This # prevents the need to select() for every client that we # process. continue except OSError as e: if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, errno.EWOULDBLOCK): raise if not self.is_parent_alive(): return try: self.wait(timeout) except StopWaiting: return def run_for_multiple(self, timeout): while self.alive: self.notify() try: ready = self.wait(timeout) except StopWaiting: return if ready is not None: for listener in ready: if listener == self.PIPE[0]: continue try: self.accept(listener) except OSError as e: if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, errno.EWOULDBLOCK): raise if not self.is_parent_alive(): return def run(self): # if no timeout is given the worker will never wait and will # use the CPU for nothing. This minimal timeout prevent it. timeout = self.timeout or 0.5 # Warn if HTTP/2 is requested - sync worker doesn't support it if 'h2' in self.cfg.http_protocols: self.log.warning( "HTTP/2 is not supported by the sync worker. " "Use gthread, gevent, eventlet, or asgi workers for HTTP/2 support. " "Falling back to HTTP/1.1 only." ) # self.socket appears to lose its blocking status after # we fork in the arbiter. Reset it here. for s in self.sockets: s.setblocking(0) if len(self.sockets) > 1: self.run_for_multiple(timeout) else: self.run_for_one(timeout) def handle(self, listener, client, addr): req = None try: if self.cfg.is_ssl: client = sock.ssl_wrap_socket(client, self.cfg) parser = http.get_parser(self.cfg, client, addr) req = next(parser) self.handle_request(listener, req, client, addr) except http.errors.NoMoreData as e: self.log.debug("Ignored premature client disconnection. %s", e) except StopIteration as e: self.log.debug("Closing connection. %s", e) except ssl.SSLError as e: if e.args[0] == ssl.SSL_ERROR_EOF: self.log.debug("ssl connection closed") client.close() else: self.log.debug("Error processing SSL request.") self.handle_error(req, client, addr, e) except OSError as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN): self.log.exception("Socket error processing request.") else: if e.errno == errno.ECONNRESET: self.log.debug("Ignoring connection reset") elif e.errno == errno.ENOTCONN: self.log.debug("Ignoring socket not connected") else: self.log.debug("Ignoring EPIPE") except BaseException as e: self.handle_error(req, client, addr, e) finally: util.close(client) def handle_request(self, listener, req, client, addr): environ = {} resp = None try: self.cfg.pre_request(self, req) request_start = datetime.now() resp, environ = wsgi.create(req, client, addr, listener.getsockname(), self.cfg) # Force the connection closed until someone shows # a buffering proxy that supports Keep-Alive to # the backend. resp.force_close() self.nr += 1 if self.nr >= self.max_requests: self.log.info("Autorestarting worker after current request.") self.alive = False respiter = self.wsgi(environ, resp.start_response) try: if isinstance(respiter, environ['wsgi.file_wrapper']): resp.write_file(respiter) else: for item in respiter: resp.write(item) resp.close() finally: request_time = datetime.now() - request_start self.log.access(resp, req, environ, request_time) if hasattr(respiter, "close"): respiter.close() except OSError: # pass to next try-except level util.reraise(*sys.exc_info()) except Exception: if resp and resp.headers_sent: # If the requests have already been sent, we should close the # connection to indicate the error. self.log.exception("Error handling request") try: client.shutdown(socket.SHUT_RDWR) client.close() except OSError: pass raise StopIteration() raise finally: try: self.cfg.post_request(self, req, environ, resp) except Exception: self.log.exception("Exception in post_request hook") benoitc-gunicorn-f5fb19e/gunicorn/workers/workertmp.py000066400000000000000000000031041514360242400234050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import time import platform import tempfile from gunicorn import util PLATFORM = platform.system() IS_CYGWIN = PLATFORM.startswith('CYGWIN') class WorkerTmp: def __init__(self, cfg): old_umask = os.umask(cfg.umask) fdir = cfg.worker_tmp_dir if fdir and not os.path.isdir(fdir): raise RuntimeError("%s doesn't exist. Can't create workertmp." % fdir) fd, name = tempfile.mkstemp(prefix="wgunicorn-", dir=fdir) os.umask(old_umask) # change the owner and group of the file if the worker will run as # a different user or group, so that the worker can modify the file if cfg.uid != os.geteuid() or cfg.gid != os.getegid(): util.chown(name, cfg.uid, cfg.gid) # unlink the file so we don't leak temporary files try: if not IS_CYGWIN: util.unlink(name) # In Python 3.8, open() emits RuntimeWarning if buffering=1 for binary mode. # Because we never write to this file, pass 0 to switch buffering off. self._tmp = os.fdopen(fd, 'w+b', 0) except Exception: os.close(fd) raise def notify(self): new_time = time.monotonic() os.utime(self._tmp.fileno(), (new_time, new_time)) def last_update(self): return os.fstat(self._tmp.fileno()).st_mtime def fileno(self): return self._tmp.fileno() def close(self): return self._tmp.close() benoitc-gunicorn-f5fb19e/mkdocs.yml000066400000000000000000000062661514360242400175000ustar00rootroot00000000000000site_name: Gunicorn site_url: https://gunicorn.org repo_url: https://github.com/benoitc/gunicorn repo_name: benoitc/gunicorn docs_dir: docs/content use_directory_urls: true nav: - Home: index.md - Getting Started: - Quickstart: quickstart.md - Install: install.md - Run: run.md - Configure: configure.md - Guides: - Deploy: deploy.md - Docker: guides/docker.md - HTTP/2: guides/http2.md - ASGI Worker: asgi.md - Dirty Arbiters: dirty.md - Control Interface: guides/gunicornc.md - uWSGI Protocol: uwsgi.md - Signals: signals.md - Instrumentation: instrumentation.md - Custom: custom.md - Design: design.md - Community: - Overview: community.md - FAQ: faq.md - Support Us: sponsor.md - Sponsor: sponsor.md - Reference: - Settings: reference/settings.md - News: - Latest: news.md - '2026': 2026-news.md - '2024': 2024-news.md - '2023': 2023-news.md - '2021': 2021-news.md - '2020': 2020-news.md - '2019': 2019-news.md - '2018': 2018-news.md - '2017': 2017-news.md - '2016': 2016-news.md - '2015': 2015-news.md - '2014': 2014-news.md - '2013': 2013-news.md - '2012': 2012-news.md - '2011': 2011-news.md - '2010': 2010-news.md theme: name: material custom_dir: overrides language: en logo: assets/gunicorn.svg favicon: assets/gunicorn.svg palette: - media: "(prefers-color-scheme: light)" scheme: default primary: green accent: teal toggle: icon: material/brightness-7 name: Switch to dark mode - media: "(prefers-color-scheme: dark)" scheme: slate primary: green accent: teal toggle: icon: material/brightness-4 name: Switch to light mode font: text: Inter code: JetBrains Mono features: - content.code.copy - content.code.annotate - navigation.instant - navigation.instant.progress - navigation.tracking - navigation.sections - navigation.tabs - navigation.tabs.sticky - navigation.top - navigation.path - search.highlight - search.suggest - search.share - toc.follow - toc.integrate icon: repo: fontawesome/brands/github plugins: - search - macros - gen-files: scripts: - scripts/build_settings_doc.py markdown_extensions: - admonition - attr_list - def_list - footnotes - md_in_html - tables - toc: permalink: true - pymdownx.details - pymdownx.highlight - pymdownx.inlinehilite - pymdownx.magiclink - pymdownx.superfences - pymdownx.snippets: base_path: - . check_paths: true - pymdownx.tabbed: alternate_style: true - pymdownx.tasklist: custom_checkbox: true extra_css: - styles/overrides.css - assets/stylesheets/home.css extra_javascript: - assets/javascripts/toc-collapse.js extra: social: - icon: fontawesome/brands/github link: https://github.com/benoitc/gunicorn - icon: fontawesome/brands/python link: https://pypi.org/project/gunicorn/ - icon: fontawesome/solid/heart link: https://github.com/sponsors/benoitc benoitc-gunicorn-f5fb19e/overrides/000077500000000000000000000000001514360242400174655ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/overrides/home.html000066400000000000000000000013731514360242400213070ustar00rootroot00000000000000{% extends "main.html" %} {% block tabs %} {{ super() }} {% endblock %} {% block htmltitle %} Gunicorn - Python WSGI HTTP Server for UNIX {% endblock %} {% block styles %} {{ super() }} {% endblock %} {% block hero %}{% endblock %} {% block content %}{% endblock %} {% block site_nav %} {{ super() }} {% endblock %} {% block container %}
{{ page.content }}
{% endblock %} {% block footer %} {{ super() }} {% endblock %} benoitc-gunicorn-f5fb19e/pyproject.toml000066400000000000000000000057061514360242400204070ustar00rootroot00000000000000[build-system] requires = ["setuptools>=61.2"] build-backend = "setuptools.build_meta" [project] # see https://packaging.python.org/en/latest/specifications/pyproject-toml/ name = "gunicorn" authors = [{name = "Benoit Chesneau", email = "benoitc@gunicorn.org"}] license = "MIT" license-files = ["LICENSE"] description = "WSGI HTTP Server for UNIX" readme = "README.md" classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Other Environment", "Intended Audience :: Developers", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet", "Topic :: Utilities", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Server", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", ] requires-python = ">=3.10" dependencies = [ "packaging", ] dynamic = ["version"] [project.urls] Homepage = "https://gunicorn.org" Documentation = "https://gunicorn.org" "Issue tracker" = "https://github.com/benoitc/gunicorn/issues" "Source code" = "https://github.com/benoitc/gunicorn" Changelog = "https://gunicorn.org/news/" [project.optional-dependencies] gevent = ["gevent>=24.10.1"] eventlet = ["eventlet>=0.40.3"] tornado = ["tornado>=6.5.0"] gthread = [] setproctitle = ["setproctitle"] http2 = ["h2>=4.1.0"] testing = [ "gevent>=24.10.1", "eventlet>=0.40.3", "h2>=4.1.0", "coverage", "pytest", "pytest-cov", "pytest-asyncio", "uvloop>=0.19.0", "httpx[http2]", ] [project.scripts] # duplicates "python -m gunicorn" handling in __main__.py gunicorn = "gunicorn.app.wsgiapp:run" gunicornc = "gunicorn.ctl.cli:main" # note the quotes around "paste.server_runner" to escape the dot [project.entry-points."paste.server_runner"] main = "gunicorn.app.pasterapp:serve" [tool.pytest.ini_options] # # can override these: python -m pytest --override-ini="addopts=" norecursedirs = ["examples", "lib", "local", "src", "tests/docker"] testpaths = ["tests/"] addopts = "--assert=plain --cov=gunicorn --cov-report=xml" filterwarnings = [ # Eventlet patches select module, which breaks asyncio event loop cleanup # This is expected behavior when testing eventlet worker "ignore::pytest.PytestUnraisableExceptionWarning", ] [tool.setuptools] zip-safe = false include-package-data = true [tool.setuptools.packages] find = {namespaces = false} [tool.setuptools.dynamic] version = {attr = "gunicorn.__version__"} benoitc-gunicorn-f5fb19e/requirements_dev.txt000066400000000000000000000004471514360242400216120ustar00rootroot00000000000000-r requirements_test.txt # setuptools v68.0 fails hard on invalid pyproject.toml # which a developer would want to know # otherwise, oldest known-working version is 61.2 setuptools>=68.0 mkdocs>=1.6 mkdocs-material>=9.5 mkdocs-gen-files>=0.5 mkdocs-macros-plugin>=1.0 pymdown-extensions>=10.0 benoitc-gunicorn-f5fb19e/requirements_test.txt000066400000000000000000000001011514360242400217760ustar00rootroot00000000000000gevent eventlet coverage pytest>=7.2.0 pytest-cov pytest-asyncio benoitc-gunicorn-f5fb19e/scripts/000077500000000000000000000000001514360242400171525ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/scripts/build_settings_doc.py000066400000000000000000000206101514360242400233670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Generate the Markdown settings reference for MkDocs.""" from __future__ import annotations import inspect import textwrap from pathlib import Path from typing import List import re import gunicorn.config as guncfg HEAD = """\ > **Generated file** — update `gunicorn/config.py` instead. # Settings This reference is built directly from `gunicorn.config.KNOWN_SETTINGS` and is regenerated during every documentation build. !!! note Settings can be provided through the `GUNICORN_CMD_ARGS` environment variable. For example: ```console $ GUNICORN_CMD_ARGS="--bind=127.0.0.1 --workers=3" gunicorn app:app ``` _Added in 19.7._ """ def _format_default(setting: guncfg.Setting) -> tuple[str, bool]: if hasattr(setting, "default_doc"): text = textwrap.dedent(setting.default_doc).strip("\n") return text, True default = setting.default if callable(default): source = textwrap.dedent(inspect.getsource(default)).strip("\n") return f"```python\n{source}\n```", True if default == "": return "`''`", False return f"`{default!r}`", False def _format_cli(setting: guncfg.Setting) -> str | None: if not setting.cli: return None if setting.meta: variants = [f"`{opt} {setting.meta}`" for opt in setting.cli] else: variants = [f"`{opt}`" for opt in setting.cli] return ", ".join(variants) REF_MAP = { "forwarded-allow-ips": ("reference/settings.md", "forwarded_allow_ips"), "forwarder-headers": ("reference/settings.md", "forwarder_headers"), "proxy-allow-ips": ("reference/settings.md", "proxy_allow_ips"), "worker-class": ("reference/settings.md", "worker_class"), "reload": ("reference/settings.md", "reload"), "raw-env": ("reference/settings.md", "raw_env"), "check-config": ("reference/settings.md", "check_config"), "errorlog": ("reference/settings.md", "errorlog"), "logconfig": ("reference/settings.md", "logconfig"), "logconfig-json": ("reference/settings.md", "logconfig_json"), "ssl-context": ("reference/settings.md", "ssl_context"), "ssl-version": ("reference/settings.md", "ssl_version"), "blocking-os-fchmod": ("reference/settings.md", "blocking_os_fchmod"), "configuration_file": ("../configure.md", "configuration-file"), } REF_PATTERN = re.compile(r":ref:`([^`]+)`") def _convert_refs(text: str) -> str: def repl(match: re.Match[str]) -> str: raw = match.group(1) if "<" in raw and raw.endswith(">"): label, target = raw.split("<", 1) target = target[:-1] label = label.replace("\n", " ").strip() else: label, target = None, raw.strip() info = REF_MAP.get(target) if not info: return (label or target).replace("\n", " ").strip() path, anchor = info if path.endswith(".md"): if path == "reference/settings.md" and anchor: href = f"#{anchor}" else: href = path + (f"#{anchor}" if anchor else "") else: href = path + (f"#{anchor}" if anchor else "") text = (label or target).replace("\n", " ").strip() return f"[{text}]({href})" return REF_PATTERN.sub(repl, text) def _consume_indented(lines: List[str], start: int) -> tuple[str, int]: body: List[str] = [] i = start while i < len(lines): line = lines[i] if line.startswith(" ") or not line.strip(): body.append(line) i += 1 else: break text = textwrap.dedent("\n".join(body)).strip("\n") return text, i def _convert_desc(desc: str) -> str: raw_lines = textwrap.dedent(desc).splitlines() output: List[str] = [] i = 0 while i < len(raw_lines): line = raw_lines[i] stripped = line.strip() if stripped.startswith(".. note::"): body, i = _consume_indented(raw_lines, i + 1) output.append("!!! note") if body: for body_line in body.splitlines(): output.append(f" {body_line}" if body_line else "") output.append("") continue if stripped.startswith(".. warning::"): body, i = _consume_indented(raw_lines, i + 1) output.append("!!! warning") if body: for body_line in body.splitlines(): output.append(f" {body_line}" if body_line else "") output.append("") continue if stripped.startswith(".. deprecated::"): version = stripped.split("::", 1)[1].strip() body, i = _consume_indented(raw_lines, i + 1) title = f"Deprecated in {version}" if version else "Deprecated" output.append(f"!!! danger \"{title}\"") if body: for body_line in body.splitlines(): output.append(f" {body_line}" if body_line else "") output.append("") continue if stripped.startswith(".. versionadded::"): version = stripped.split("::", 1)[1].strip() body, i = _consume_indented(raw_lines, i + 1) title = f"Added in {version}" if version else "Added" output.append(f"!!! info \"{title}\"") if body: for body_line in body.splitlines(): output.append(f" {body_line}" if body_line else "") output.append("") continue if stripped.startswith(".. versionchanged::"): version = stripped.split("::", 1)[1].strip() body, i = _consume_indented(raw_lines, i + 1) title = f"Changed in {version}" if version else "Changed" output.append(f"!!! info \"{title}\"") if body: for body_line in body.splitlines(): output.append(f" {body_line}" if body_line else "") output.append("") continue if stripped.startswith(".. code::") or stripped.startswith(".. code-block::"): language = stripped.split("::", 1)[1].strip() body, i = _consume_indented(raw_lines, i + 1) fence = language or "text" output.append(f"```{fence}") if body: output.append(body) output.append("```") output.append("") continue output.append(line) i += 1 text = "\n".join(output) text = _convert_refs(text) # Collapse excessive blank lines text = re.sub(r"\n{3,}", "\n\n", text) return text.strip("\n") def _format_setting(setting: guncfg.Setting) -> str: lines: list[str] = [f"### `{setting.name}`", ""] cli = _format_cli(setting) if cli: lines.extend((f"**Command line:** {cli}", "")) default_text, is_block = _format_default(setting) if is_block: lines.append("**Default:**") lines.append("") lines.append(default_text) else: lines.append(f"**Default:** {default_text}") lines.append("") desc = _convert_desc(setting.desc) if desc: lines.append(desc) lines.append("") return "\n".join(lines) def render_settings() -> str: sections: list[str] = [HEAD, '', ""] known_settings = sorted(guncfg.KNOWN_SETTINGS, key=lambda s: s.section) current_section: str | None = None for setting in known_settings: if setting.section != current_section: current_section = setting.section sections.append(f"## {current_section}\n") sections.append(_format_setting(setting)) return "\n".join(sections).strip() + "\n" def _write_output(markdown: str) -> None: try: import mkdocs_gen_files # type: ignore except ImportError: mkdocs_gen_files = None if mkdocs_gen_files is not None: try: with mkdocs_gen_files.open("reference/settings.md", "w") as fh: fh.write(markdown) return except Exception: pass output = Path(__file__).resolve().parents[1] / "docs" / "content" / "reference" / "settings.md" output.parent.mkdir(parents=True, exist_ok=True) output.write_text(markdown, encoding="utf-8") def main() -> None: markdown = render_settings() _write_output(markdown) if __name__ == "__main__": main() benoitc-gunicorn-f5fb19e/scripts/update_thanks.py000066400000000000000000000022421514360242400223560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. #!/usr/bin/env python # Usage: git log --format="%an <%ae>" | python update_thanks.py # You will get a result.txt file, you can work with the file (update, remove, ...) # # Install # ======= # pip install validate_email pyDNS # import sys from validate_email import validate_email from email.utils import parseaddr import DNS.Base addresses = set() bad_addresses = set() collection = [] lines = list(reversed(sys.stdin.readlines())) for author in map(str.strip, lines): realname, email_address = parseaddr(author) if email_address not in addresses: if email_address in bad_addresses: continue else: try: value = validate_email(email_address) if value: addresses.add(email_address) collection.append(author) else: bad_addresses.add(email_address) except DNS.Base.TimeoutError: bad_addresses.add(email_address) with open('result.txt', 'w') as output: output.write('\n'.join(collection)) benoitc-gunicorn-f5fb19e/tests/000077500000000000000000000000001514360242400166255ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/config/000077500000000000000000000000001514360242400200725ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/config/__init__.py000066400000000000000000000001521514360242400222010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/tests/config/test_cfg.py000066400000000000000000000003021514360242400222350ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. bind = "unix:/tmp/bar/baz" workers = 3 proc_name = "fooey" default_proc_name = "blurgh" benoitc-gunicorn-f5fb19e/tests/config/test_cfg_alt.py000066400000000000000000000002021514360242400230740ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. proc_name = "not-fooey" benoitc-gunicorn-f5fb19e/tests/config/test_cfg_with_wsgi_app.py000066400000000000000000000002011514360242400251570ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. wsgi_app = "app1:app1" benoitc-gunicorn-f5fb19e/tests/conftest.py000066400000000000000000000007151514360242400210270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Pytest configuration for gunicorn tests.""" import os import sys # Add the tests directory to sys.path so test support modules can be imported # as 'tests.module_name' (e.g., 'tests.support_dirty_apps:CounterApp') tests_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if tests_dir not in sys.path: sys.path.insert(0, tests_dir) benoitc-gunicorn-f5fb19e/tests/ctl/000077500000000000000000000000001514360242400174075ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/ctl/__init__.py000066400000000000000000000001511514360242400215150ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/tests/ctl/test_client.py000066400000000000000000000217311514360242400223020ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for control socket client.""" import os import socket import tempfile import threading import pytest from gunicorn.ctl.client import ( ControlClient, ControlClientError, parse_command, ) from gunicorn.ctl.protocol import ControlProtocol, make_response class TestControlClientInit: """Tests for ControlClient initialization.""" def test_init_attributes(self): """Test that client is initialized with correct attributes.""" client = ControlClient("/tmp/test.sock", timeout=60.0) assert client.socket_path == "/tmp/test.sock" assert client.timeout == 60.0 assert client._sock is None assert client._request_id == 0 class TestControlClientConnect: """Tests for ControlClient connection.""" def test_connect_nonexistent_socket(self): """Test connecting to non-existent socket.""" client = ControlClient("/nonexistent/socket.sock") with pytest.raises(ControlClientError) as exc_info: client.connect() assert "Failed to connect" in str(exc_info.value) def test_connect_success(self): """Test successful connection.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") # Create a listening socket server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) try: client = ControlClient(socket_path) client.connect() assert client._sock is not None client.close() finally: server_sock.close() def test_connect_already_connected(self): """Test that connect is idempotent.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) try: client = ControlClient(socket_path) client.connect() first_sock = client._sock client.connect() # Should not create new connection assert client._sock is first_sock client.close() finally: server_sock.close() class TestControlClientClose: """Tests for ControlClient close.""" def test_close_idempotent(self): """Test that close can be called multiple times.""" client = ControlClient("/tmp/test.sock") client.close() client.close() # Should not raise def test_close_clears_socket(self): """Test that close clears the socket.""" client = ControlClient("/tmp/test.sock") client._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) client.close() assert client._sock is None class TestControlClientContextManager: """Tests for context manager functionality.""" def test_context_manager_connection_error(self): """Test context manager with connection error.""" client = ControlClient("/nonexistent/socket.sock") with pytest.raises(ControlClientError): with client: pass def test_context_manager_success(self): """Test successful context manager usage.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) try: with ControlClient(socket_path) as client: assert client._sock is not None # After context manager exits, socket should be closed assert client._sock is None finally: server_sock.close() class TestControlClientSendCommand: """Tests for send_command functionality.""" def test_send_command_success(self): """Test successful command send.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) response_data = {"workers": [], "count": 0} response_sent = threading.Event() def server_handler(): conn, _ = server_sock.accept() try: msg = ControlProtocol.read_message(conn) resp = make_response(msg["id"], response_data) ControlProtocol.write_message(conn, resp) response_sent.set() finally: conn.close() server_thread = threading.Thread(target=server_handler) server_thread.start() try: client = ControlClient(socket_path, timeout=5.0) result = client.send_command("show workers") assert result == response_data client.close() finally: response_sent.wait(timeout=2.0) server_thread.join(timeout=2.0) server_sock.close() def test_send_command_error_response(self): """Test handling error response.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) def server_handler(): conn, _ = server_sock.accept() try: msg = ControlProtocol.read_message(conn) resp = { "id": msg["id"], "status": "error", "error": "Unknown command", } ControlProtocol.write_message(conn, resp) finally: conn.close() server_thread = threading.Thread(target=server_handler) server_thread.start() try: client = ControlClient(socket_path, timeout=5.0) with pytest.raises(ControlClientError) as exc_info: client.send_command("invalid command") assert "Unknown command" in str(exc_info.value) client.close() finally: server_thread.join(timeout=2.0) server_sock.close() def test_send_command_auto_connect(self): """Test that send_command auto-connects if not connected.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) def server_handler(): conn, _ = server_sock.accept() try: msg = ControlProtocol.read_message(conn) resp = make_response(msg["id"], {}) ControlProtocol.write_message(conn, resp) finally: conn.close() server_thread = threading.Thread(target=server_handler) server_thread.start() try: client = ControlClient(socket_path, timeout=5.0) # Don't call connect() explicitly result = client.send_command("help") assert isinstance(result, dict) client.close() finally: server_thread.join(timeout=2.0) server_sock.close() class TestParseCommand: """Tests for command parsing.""" def test_parse_simple_command(self): """Test parsing simple command.""" cmd, args = parse_command("show workers") assert cmd == "show workers" assert args == [] def test_parse_command_with_args(self): """Test parsing command with arguments.""" cmd, args = parse_command("worker add 2") assert cmd == "worker add" assert args == ["2"] def test_parse_command_with_multiple_args(self): """Test parsing command with multiple arguments.""" cmd, args = parse_command("worker kill 12345") assert cmd == "worker kill" assert args == ["12345"] def test_parse_empty_command(self): """Test parsing empty command.""" cmd, args = parse_command("") assert cmd == "" assert args == [] def test_parse_command_quoted(self): """Test parsing command with quoted arguments.""" cmd, args = parse_command('worker kill "12345"') assert cmd == "worker kill" assert args == ["12345"] benoitc-gunicorn-f5fb19e/tests/ctl/test_handlers.py000066400000000000000000000316271514360242400226310ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for control socket command handlers.""" import signal import time from unittest.mock import MagicMock, patch from gunicorn.ctl.handlers import CommandHandlers class MockWorker: """Mock worker for testing.""" def __init__(self, pid, age, booted=True, aborted=False): self.pid = pid self.age = age self.booted = booted self.aborted = aborted self.tmp = MagicMock() self.tmp.last_update.return_value = time.monotonic() class MockListener: """Mock listener for testing.""" def __init__(self, address, fd=3): self._address = address self._fd = fd self.sock = MagicMock() self.sock.family = 2 # AF_INET def __str__(self): return self._address def fileno(self): return self._fd class MockConfig: """Mock config for testing.""" def __init__(self): self.bind = ['127.0.0.1:8000'] self.workers = 4 self.worker_class = 'sync' self.threads = 1 self.timeout = 30 self.graceful_timeout = 30 self.keepalive = 2 self.max_requests = 0 self.max_requests_jitter = 0 self.worker_connections = 1000 self.preload_app = False self.daemon = False self.pidfile = None self.proc_name = 'test_app' self.reload = False self.dirty_workers = 0 self.dirty_apps = [] self.dirty_timeout = 30 self.control_socket = 'gunicorn.ctl' self.control_socket_disable = False class MockArbiter: """Mock arbiter for testing.""" def __init__(self): self.cfg = MockConfig() self.pid = 12345 self.WORKERS = {} self.LISTENERS = [] self.dirty_arbiter_pid = 0 self.dirty_arbiter = None self.num_workers = 4 self._stats = { 'start_time': time.time() - 3600, # 1 hour ago 'workers_spawned': 10, 'workers_killed': 5, 'reloads': 2, } def wakeup(self): pass class TestShowWorkers: """Tests for show workers command.""" def test_show_workers_empty(self): """Test showing workers when none exist.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.show_workers() assert result["workers"] == [] assert result["count"] == 0 def test_show_workers_with_workers(self): """Test showing workers.""" arbiter = MockArbiter() arbiter.WORKERS = { 1001: MockWorker(1001, 1), 1002: MockWorker(1002, 2), 1003: MockWorker(1003, 3), } handlers = CommandHandlers(arbiter) result = handlers.show_workers() assert result["count"] == 3 assert len(result["workers"]) == 3 # Verify sorted by age ages = [w["age"] for w in result["workers"]] assert ages == sorted(ages) # Verify worker data worker = result["workers"][0] assert "pid" in worker assert "age" in worker assert "booted" in worker assert "last_heartbeat" in worker class TestShowStats: """Tests for show stats command.""" def test_show_stats(self): """Test showing stats.""" arbiter = MockArbiter() arbiter.WORKERS = { 1001: MockWorker(1001, 1), 1002: MockWorker(1002, 2), } handlers = CommandHandlers(arbiter) result = handlers.show_stats() assert result["pid"] == 12345 assert result["workers_current"] == 2 assert result["workers_target"] == 4 assert result["workers_spawned"] == 10 assert result["workers_killed"] == 5 assert result["reloads"] == 2 assert result["uptime"] is not None assert result["uptime"] > 0 class TestShowConfig: """Tests for show config command.""" def test_show_config(self): """Test showing config.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.show_config() assert result["workers"] == 4 assert result["timeout"] == 30 assert result["bind"] == ['127.0.0.1:8000'] class TestShowListeners: """Tests for show listeners command.""" def test_show_listeners_empty(self): """Test showing listeners when none exist.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.show_listeners() assert result["listeners"] == [] assert result["count"] == 0 def test_show_listeners(self): """Test showing listeners.""" arbiter = MockArbiter() arbiter.LISTENERS = [ MockListener("127.0.0.1:8000", fd=3), MockListener("127.0.0.1:8001", fd=4), ] handlers = CommandHandlers(arbiter) result = handlers.show_listeners() assert result["count"] == 2 assert len(result["listeners"]) == 2 assert result["listeners"][0]["address"] == "127.0.0.1:8000" class TestWorkerAdd: """Tests for worker add command.""" def test_worker_add_default(self): """Test adding one worker (default).""" arbiter = MockArbiter() arbiter.wakeup = MagicMock() handlers = CommandHandlers(arbiter) result = handlers.worker_add() assert result["added"] == 1 assert result["previous"] == 4 assert result["total"] == 5 assert arbiter.num_workers == 5 arbiter.wakeup.assert_called_once() def test_worker_add_multiple(self): """Test adding multiple workers.""" arbiter = MockArbiter() arbiter.wakeup = MagicMock() handlers = CommandHandlers(arbiter) result = handlers.worker_add(3) assert result["added"] == 3 assert result["total"] == 7 class TestWorkerRemove: """Tests for worker remove command.""" def test_worker_remove_default(self): """Test removing one worker (default).""" arbiter = MockArbiter() arbiter.wakeup = MagicMock() handlers = CommandHandlers(arbiter) result = handlers.worker_remove() assert result["removed"] == 1 assert result["previous"] == 4 assert result["total"] == 3 assert arbiter.num_workers == 3 arbiter.wakeup.assert_called_once() def test_worker_remove_cannot_go_below_one(self): """Test that worker count cannot go below 1.""" arbiter = MockArbiter() arbiter.num_workers = 2 arbiter.wakeup = MagicMock() handlers = CommandHandlers(arbiter) result = handlers.worker_remove(5) assert result["removed"] == 1 assert result["total"] == 1 assert arbiter.num_workers == 1 class TestWorkerKill: """Tests for worker kill command.""" def test_worker_kill_success(self): """Test killing a worker.""" arbiter = MockArbiter() arbiter.WORKERS = {1001: MockWorker(1001, 1)} handlers = CommandHandlers(arbiter) with patch('os.kill') as mock_kill: result = handlers.worker_kill(1001) assert result["success"] is True assert result["killed"] == 1001 mock_kill.assert_called_once_with(1001, signal.SIGTERM) def test_worker_kill_not_found(self): """Test killing a non-existent worker.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.worker_kill(9999) assert result["success"] is False assert "not found" in result["error"] class TestShowDirty: """Tests for show dirty command.""" def test_show_dirty_disabled(self): """Test showing dirty when disabled.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.show_dirty() assert result["enabled"] is False assert result["pid"] is None class TestDirtyAdd: """Tests for dirty add command.""" def test_dirty_add_not_running(self): """Test dirty add when dirty arbiter not running.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.dirty_add() assert result["success"] is False assert "not running" in result["error"] def test_dirty_add_no_socket(self): """Test dirty add when socket path not available.""" arbiter = MockArbiter() arbiter.dirty_arbiter_pid = 2000 handlers = CommandHandlers(arbiter) # No dirty_arbiter attribute and no env var with patch.dict('os.environ', {}, clear=True): result = handlers.dirty_add() assert result["success"] is False assert "socket" in result["error"].lower() class TestDirtyRemove: """Tests for dirty remove command.""" def test_dirty_remove_not_running(self): """Test dirty remove when dirty arbiter not running.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.dirty_remove() assert result["success"] is False assert "not running" in result["error"] def test_dirty_remove_no_socket(self): """Test dirty remove when socket path not available.""" arbiter = MockArbiter() arbiter.dirty_arbiter_pid = 2000 handlers = CommandHandlers(arbiter) # No dirty_arbiter attribute and no env var with patch.dict('os.environ', {}, clear=True): result = handlers.dirty_remove() assert result["success"] is False assert "socket" in result["error"].lower() class TestReload: """Tests for reload command.""" def test_reload(self): """Test reload command.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) with patch('os.kill') as mock_kill: result = handlers.reload() assert result["status"] == "reloading" mock_kill.assert_called_once_with(12345, signal.SIGHUP) class TestReopen: """Tests for reopen command.""" def test_reopen(self): """Test reopen command.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) with patch('os.kill') as mock_kill: result = handlers.reopen() assert result["status"] == "reopening" mock_kill.assert_called_once_with(12345, signal.SIGUSR1) class TestShutdown: """Tests for shutdown command.""" def test_shutdown_graceful(self): """Test graceful shutdown.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) with patch('os.kill') as mock_kill: result = handlers.shutdown() assert result["status"] == "shutting_down" assert result["mode"] == "graceful" mock_kill.assert_called_once_with(12345, signal.SIGTERM) def test_shutdown_quick(self): """Test quick shutdown.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) with patch('os.kill') as mock_kill: result = handlers.shutdown("quick") assert result["status"] == "shutting_down" assert result["mode"] == "quick" mock_kill.assert_called_once_with(12345, signal.SIGINT) class TestShowAll: """Tests for show all command.""" def test_show_all_basic(self): """Test show all command.""" arbiter = MockArbiter() arbiter.WORKERS = { 1001: MockWorker(1001, 1), 1002: MockWorker(1002, 2), } handlers = CommandHandlers(arbiter) result = handlers.show_all() assert "arbiter" in result assert result["arbiter"]["pid"] == 12345 assert result["arbiter"]["type"] == "arbiter" assert "web_workers" in result assert result["web_worker_count"] == 2 assert len(result["web_workers"]) == 2 assert "dirty_arbiter" in result assert result["dirty_arbiter"] is None # No dirty workers when no dirty arbiter assert result["dirty_worker_count"] == 0 def test_show_all_with_dirty(self): """Test show all with dirty arbiter running.""" arbiter = MockArbiter() arbiter.dirty_arbiter_pid = 2000 handlers = CommandHandlers(arbiter) result = handlers.show_all() assert result["dirty_arbiter"] is not None assert result["dirty_arbiter"]["pid"] == 2000 assert result["dirty_arbiter"]["type"] == "dirty_arbiter" class TestHelp: """Tests for help command.""" def test_help(self): """Test help command.""" arbiter = MockArbiter() handlers = CommandHandlers(arbiter) result = handlers.help() assert "commands" in result commands = result["commands"] assert "show all" in commands assert "show workers" in commands assert "worker add [N]" in commands assert "reload" in commands assert "shutdown [graceful|quick]" in commands benoitc-gunicorn-f5fb19e/tests/ctl/test_protocol.py000066400000000000000000000166431514360242400226730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for control socket protocol.""" import json import struct import pytest from gunicorn.ctl.protocol import ( ControlProtocol, ProtocolError, make_request, make_response, make_error_response, ) class TestControlProtocolEncoding: """Tests for message encoding/decoding.""" def test_encode_message_simple(self): """Test encoding a simple message.""" data = {"command": "test"} result = ControlProtocol.encode_message(data) # First 4 bytes are length length = struct.unpack('>I', result[:4])[0] payload = result[4:] assert length == len(payload) assert json.loads(payload.decode('utf-8')) == data def test_encode_message_unicode(self): """Test encoding message with unicode characters.""" data = {"message": "Hello \u4e16\u754c"} result = ControlProtocol.encode_message(data) length = struct.unpack('>I', result[:4])[0] payload = result[4:] assert length == len(payload) assert json.loads(payload.decode('utf-8')) == data def test_decode_message_simple(self): """Test decoding a simple message.""" data = {"command": "test", "args": [1, 2, 3]} payload = json.dumps(data).encode('utf-8') length = struct.pack('>I', len(payload)) raw = length + payload result = ControlProtocol.decode_message(raw) assert result == data def test_decode_message_too_short(self): """Test decoding message that's too short.""" with pytest.raises(ProtocolError) as exc_info: ControlProtocol.decode_message(b'\x00\x00') assert "too short" in str(exc_info.value) def test_decode_message_incomplete(self): """Test decoding incomplete message.""" # Length says 100 bytes but only 4 bytes provided raw = struct.pack('>I', 100) + b'test' with pytest.raises(ProtocolError) as exc_info: ControlProtocol.decode_message(raw) assert "Incomplete" in str(exc_info.value) def test_roundtrip(self): """Test encode/decode roundtrip.""" original = { "id": 42, "command": "show workers", "args": ["arg1", 123, True, None], "nested": {"a": 1, "b": [1, 2, 3]}, } encoded = ControlProtocol.encode_message(original) decoded = ControlProtocol.decode_message(encoded) assert decoded == original class TestMakeRequest: """Tests for request creation.""" def test_make_request_simple(self): """Test creating a simple request.""" result = make_request(1, "show workers") assert result["id"] == 1 assert result["command"] == "show workers" assert result["args"] == [] def test_make_request_with_args(self): """Test creating a request with arguments.""" result = make_request(42, "worker add", [2]) assert result["id"] == 42 assert result["command"] == "worker add" assert result["args"] == [2] class TestMakeResponse: """Tests for response creation.""" def test_make_response_simple(self): """Test creating a simple response.""" result = make_response(1, {"count": 5}) assert result["id"] == 1 assert result["status"] == "ok" assert result["data"] == {"count": 5} def test_make_response_empty_data(self): """Test creating response with no data.""" result = make_response(1) assert result["id"] == 1 assert result["status"] == "ok" assert result["data"] == {} class TestMakeErrorResponse: """Tests for error response creation.""" def test_make_error_response(self): """Test creating an error response.""" result = make_error_response(1, "Unknown command") assert result["id"] == 1 assert result["status"] == "error" assert result["error"] == "Unknown command" class TestControlProtocolSocket: """Tests for socket reading/writing.""" def test_read_write_message(self): """Test read/write through socket pair.""" import socket import threading data = {"id": 1, "command": "test"} received = [] # Create socket pair server, client = socket.socketpair() def reader(): received.append(ControlProtocol.read_message(server)) t = threading.Thread(target=reader) t.start() ControlProtocol.write_message(client, data) t.join(timeout=2.0) client.close() server.close() assert len(received) == 1 assert received[0] == data def test_read_connection_closed(self): """Test reading from closed connection.""" import socket server, client = socket.socketpair() client.close() with pytest.raises(ConnectionError): ControlProtocol.read_message(server) server.close() def test_read_message_too_large(self): """Test reading message exceeding max size.""" import socket server, client = socket.socketpair() # Send a length that exceeds MAX_MESSAGE_SIZE huge_length = ControlProtocol.MAX_MESSAGE_SIZE + 1 client.send(struct.pack('>I', huge_length)) with pytest.raises(ProtocolError) as exc_info: ControlProtocol.read_message(server) assert "too large" in str(exc_info.value) client.close() server.close() class TestControlProtocolAsync: """Tests for async protocol methods.""" @pytest.mark.asyncio async def test_async_read_write(self): """Test async read/write using a unix server.""" import asyncio import tempfile import os data = {"id": 1, "command": "async test"} received = [] with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") async def handler(reader, writer): msg = await ControlProtocol.read_message_async(reader) received.append(msg) await ControlProtocol.write_message_async(writer, data) writer.close() await writer.wait_closed() server = await asyncio.start_unix_server(handler, path=socket_path) async with server: reader, writer = await asyncio.open_unix_connection(socket_path) await ControlProtocol.write_message_async(writer, data) response = await ControlProtocol.read_message_async(reader) writer.close() await writer.wait_closed() assert len(received) == 1 assert received[0] == data assert response == data class TestProtocolMaxSize: """Tests for protocol size limits.""" def test_max_message_size_constant(self): """Test that MAX_MESSAGE_SIZE is set to a reasonable value.""" # Should be 16 MB assert ControlProtocol.MAX_MESSAGE_SIZE == 16 * 1024 * 1024 def test_encode_large_message(self): """Test encoding a large (but valid) message.""" # Create a message with ~1MB of data data = {"data": "x" * (1024 * 1024)} encoded = ControlProtocol.encode_message(data) # Should succeed and be decodable decoded = ControlProtocol.decode_message(encoded) assert decoded == data benoitc-gunicorn-f5fb19e/tests/ctl/test_server.py000066400000000000000000000251751514360242400223400ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for control socket server.""" import os import tempfile import time from unittest.mock import MagicMock import pytest from gunicorn.ctl.server import ControlSocketServer from gunicorn.ctl.client import ControlClient class MockWorker: """Mock worker for testing.""" def __init__(self, pid, age, booted=True, aborted=False): self.pid = pid self.age = age self.booted = booted self.aborted = aborted self.tmp = MagicMock() self.tmp.last_update.return_value = time.monotonic() class MockConfig: """Mock config for testing.""" def __init__(self): self.bind = ['127.0.0.1:8000'] self.workers = 4 self.worker_class = 'sync' self.threads = 1 self.timeout = 30 self.graceful_timeout = 30 self.keepalive = 2 self.max_requests = 0 self.max_requests_jitter = 0 self.worker_connections = 1000 self.preload_app = False self.daemon = False self.pidfile = None self.proc_name = 'test_app' self.reload = False self.dirty_workers = 0 self.dirty_apps = [] self.dirty_timeout = 30 self.control_socket = 'gunicorn.ctl' self.control_socket_disable = False class MockLog: """Mock logger for testing.""" def debug(self, msg, *args): pass def info(self, msg, *args): pass def warning(self, msg, *args): pass def error(self, msg, *args): pass def exception(self, msg, *args): pass class MockArbiter: """Mock arbiter for testing.""" def __init__(self): self.cfg = MockConfig() self.log = MockLog() self.pid = 12345 self.WORKERS = {} self.LISTENERS = [] self.dirty_arbiter_pid = 0 self.dirty_arbiter = None self.num_workers = 4 self._stats = { 'start_time': time.time() - 3600, 'workers_spawned': 10, 'workers_killed': 5, 'reloads': 2, } def wakeup(self): pass class TestControlSocketServerInit: """Tests for server initialization.""" def test_init(self): """Test server initialization.""" arbiter = MockArbiter() server = ControlSocketServer(arbiter, "/tmp/test.sock", 0o600) assert server.arbiter is arbiter assert server.socket_path == "/tmp/test.sock" assert server.socket_mode == 0o600 assert server._running is False class TestControlSocketServerLifecycle: """Tests for server start/stop.""" def test_start_stop(self): """Test starting and stopping the server.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() server = ControlSocketServer(arbiter, socket_path) server.start() # Wait for server to start for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready assert os.path.exists(socket_path) server.stop() # Wait for cleanup time.sleep(0.2) # Socket should be cleaned up assert not os.path.exists(socket_path) def test_start_already_running(self): """Test that start is idempotent.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() server = ControlSocketServer(arbiter, socket_path) server.start() first_thread = server._thread server.start() assert server._thread is first_thread server.stop() def test_stop_not_running(self): """Test stopping a non-running server.""" arbiter = MockArbiter() server = ControlSocketServer(arbiter, "/tmp/test.sock") # Should not raise server.stop() class TestControlSocketServerIntegration: """Integration tests for server with client.""" def test_show_workers(self): """Test show workers command.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() arbiter.WORKERS = { 1001: MockWorker(1001, 1), 1002: MockWorker(1002, 2), } server = ControlSocketServer(arbiter, socket_path) server.start() # Wait for server to start for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready try: with ControlClient(socket_path, timeout=5.0) as client: result = client.send_command("show workers") assert result["count"] == 2 assert len(result["workers"]) == 2 finally: server.stop() def test_show_stats(self): """Test show stats command.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() server = ControlSocketServer(arbiter, socket_path) server.start() for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready try: with ControlClient(socket_path, timeout=5.0) as client: result = client.send_command("show stats") assert result["pid"] == 12345 assert result["workers_spawned"] == 10 finally: server.stop() def test_help_command(self): """Test help command.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() server = ControlSocketServer(arbiter, socket_path) server.start() for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready try: with ControlClient(socket_path, timeout=5.0) as client: result = client.send_command("help") assert "commands" in result assert "show workers" in result["commands"] finally: server.stop() def test_worker_add(self): """Test worker add command.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() arbiter.wakeup = MagicMock() server = ControlSocketServer(arbiter, socket_path) server.start() for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready try: with ControlClient(socket_path, timeout=5.0) as client: result = client.send_command("worker add 2") assert result["added"] == 2 assert result["total"] == 6 assert arbiter.num_workers == 6 arbiter.wakeup.assert_called() finally: server.stop() def test_invalid_command(self): """Test handling invalid command.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() server = ControlSocketServer(arbiter, socket_path) server.start() for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready try: with ControlClient(socket_path, timeout=5.0) as client: with pytest.raises(Exception) as exc_info: client.send_command("invalid_command") assert "Unknown command" in str(exc_info.value) finally: server.stop() def test_multiple_commands(self): """Test sending multiple commands on same connection.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() arbiter.WORKERS = {1001: MockWorker(1001, 1)} server = ControlSocketServer(arbiter, socket_path) server.start() for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) time.sleep(0.2) # Extra wait for server to be fully ready try: with ControlClient(socket_path, timeout=5.0) as client: result1 = client.send_command("show workers") result2 = client.send_command("show stats") result3 = client.send_command("help") assert result1["count"] == 1 assert result2["pid"] == 12345 assert "commands" in result3 finally: server.stop() class TestControlSocketServerPermissions: """Tests for socket permissions.""" @pytest.mark.skipif( os.uname().sysname == "FreeBSD", reason="FreeBSD socket permissions behavior differs" ) def test_socket_permissions(self): """Test that socket is created with correct permissions.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = MockArbiter() server = ControlSocketServer(arbiter, socket_path, 0o660) server.start() # Wait for socket to exist for _ in range(50): if os.path.exists(socket_path): break time.sleep(0.1) # Extra wait for chmod to complete time.sleep(0.2) try: mode = os.stat(socket_path).st_mode & 0o777 assert mode == 0o660 finally: server.stop() benoitc-gunicorn-f5fb19e/tests/dirty/000077500000000000000000000000001514360242400177605ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/dirty/__init__.py000066400000000000000000000002401514360242400220650ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty worker streaming functionality.""" benoitc-gunicorn-f5fb19e/tests/dirty/test_arbiter_signals.py000066400000000000000000000201561514360242400245450ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty arbiter TTIN/TTOU signal handling.""" import signal from unittest.mock import Mock import pytest class TestDirtyArbiterSignals: """Test TTIN/TTOU signal handling in DirtyArbiter.""" @pytest.fixture def arbiter(self, tmp_path): """Create a DirtyArbiter for testing.""" from gunicorn.dirty.arbiter import DirtyArbiter cfg = Mock() cfg.dirty_workers = 2 cfg.dirty_apps = [] cfg.dirty_timeout = 30 cfg.dirty_graceful_timeout = 30 cfg.on_dirty_starting = Mock() log = Mock() arbiter = DirtyArbiter(cfg, log, socket_path=str(tmp_path / "test.sock")) return arbiter def test_initial_num_workers_from_config(self, arbiter): """num_workers should be initialized from config.""" assert arbiter.num_workers == 2 def test_ttin_increases_num_workers(self, arbiter): """SIGTTIN should increase num_workers by 1.""" assert arbiter.num_workers == 2 arbiter._signal_handler(signal.SIGTTIN, None) assert arbiter.num_workers == 3 def test_ttin_logs_info(self, arbiter): """SIGTTIN should log info about the change.""" arbiter._signal_handler(signal.SIGTTIN, None) arbiter.log.info.assert_called() call_args = arbiter.log.info.call_args[0] assert "SIGTTIN" in call_args[0] assert "3" in str(call_args) def test_ttou_decreases_num_workers(self, arbiter): """SIGTTOU should decrease num_workers by 1.""" arbiter.num_workers = 3 arbiter._signal_handler(signal.SIGTTOU, None) assert arbiter.num_workers == 2 def test_ttou_logs_info(self, arbiter): """SIGTTOU should log info about the change.""" arbiter.num_workers = 3 arbiter._signal_handler(signal.SIGTTOU, None) arbiter.log.info.assert_called() call_args = arbiter.log.info.call_args[0] assert "SIGTTOU" in call_args[0] assert "2" in str(call_args) def test_ttou_respects_minimum_one_worker(self, arbiter): """SIGTTOU should not go below 1 worker by default.""" arbiter.num_workers = 1 arbiter._signal_handler(signal.SIGTTOU, None) assert arbiter.num_workers == 1 def test_ttou_logs_warning_at_minimum(self, arbiter): """SIGTTOU should log warning when at minimum.""" arbiter.num_workers = 1 arbiter._signal_handler(signal.SIGTTOU, None) arbiter.log.warning.assert_called() call_args = arbiter.log.warning.call_args[0] assert "Cannot decrease below" in call_args[0] def test_ttou_respects_app_minimum(self, arbiter): """SIGTTOU should not go below app-required minimum.""" # App requires 3 workers arbiter.app_specs = { 'myapp:HeavyTask': { 'import_path': 'myapp:HeavyTask', 'worker_count': 3, 'original_spec': 'myapp:HeavyTask:3', } } arbiter.num_workers = 3 # Should not decrease below 3 arbiter._signal_handler(signal.SIGTTOU, None) assert arbiter.num_workers == 3 arbiter.log.warning.assert_called() def test_ttou_with_unlimited_app(self, arbiter): """Apps with worker_count=None should not impose minimum.""" arbiter.app_specs = { 'myapp:UnlimitedTask': { 'import_path': 'myapp:UnlimitedTask', 'worker_count': None, 'original_spec': 'myapp:UnlimitedTask', } } arbiter.num_workers = 2 # Should decrease to 1 (default minimum) arbiter._signal_handler(signal.SIGTTOU, None) assert arbiter.num_workers == 1 def test_multiple_ttin_signals(self, arbiter): """Multiple TTIN signals should keep incrementing.""" assert arbiter.num_workers == 2 arbiter._signal_handler(signal.SIGTTIN, None) arbiter._signal_handler(signal.SIGTTIN, None) arbiter._signal_handler(signal.SIGTTIN, None) assert arbiter.num_workers == 5 def test_multiple_ttou_signals(self, arbiter): """Multiple TTOU signals should decrement until minimum.""" arbiter.num_workers = 5 arbiter._signal_handler(signal.SIGTTOU, None) arbiter._signal_handler(signal.SIGTTOU, None) arbiter._signal_handler(signal.SIGTTOU, None) arbiter._signal_handler(signal.SIGTTOU, None) # Should stop at 1 assert arbiter.num_workers == 1 class TestGetMinimumWorkers: """Test _get_minimum_workers calculation.""" @pytest.fixture def arbiter(self, tmp_path): """Create a DirtyArbiter for testing.""" from gunicorn.dirty.arbiter import DirtyArbiter cfg = Mock() cfg.dirty_workers = 2 cfg.dirty_apps = [] cfg.dirty_timeout = 30 cfg.dirty_graceful_timeout = 30 cfg.on_dirty_starting = Mock() log = Mock() arbiter = DirtyArbiter(cfg, log, socket_path=str(tmp_path / "test.sock")) return arbiter def test_minimum_workers_no_apps(self, arbiter): """With no apps, minimum should be 1.""" arbiter.app_specs = {} assert arbiter._get_minimum_workers() == 1 def test_minimum_workers_single_app_with_limit(self, arbiter): """Single app with worker_count should set minimum.""" arbiter.app_specs = { 'app:Task': { 'import_path': 'app:Task', 'worker_count': 3, 'original_spec': 'app:Task:3', } } assert arbiter._get_minimum_workers() == 3 def test_minimum_workers_single_app_unlimited(self, arbiter): """Single app with worker_count=None should use default minimum.""" arbiter.app_specs = { 'app:Task': { 'import_path': 'app:Task', 'worker_count': None, 'original_spec': 'app:Task', } } assert arbiter._get_minimum_workers() == 1 def test_minimum_workers_multiple_apps_with_limits(self, arbiter): """Multiple apps should use the maximum worker_count.""" arbiter.app_specs = { 'app1:Task1': { 'import_path': 'app1:Task1', 'worker_count': 2, 'original_spec': 'app1:Task1:2', }, 'app2:Task2': { 'import_path': 'app2:Task2', 'worker_count': 4, 'original_spec': 'app2:Task2:4', }, 'app3:Task3': { 'import_path': 'app3:Task3', 'worker_count': 3, 'original_spec': 'app3:Task3:3', }, } # Maximum of (2, 4, 3) = 4 assert arbiter._get_minimum_workers() == 4 def test_minimum_workers_mixed_limited_and_unlimited(self, arbiter): """Mixed apps should use max of limited apps only.""" arbiter.app_specs = { 'app1:Task1': { 'import_path': 'app1:Task1', 'worker_count': 2, 'original_spec': 'app1:Task1:2', }, 'app2:Task2': { 'import_path': 'app2:Task2', 'worker_count': None, 'original_spec': 'app2:Task2', }, 'app3:Task3': { 'import_path': 'app3:Task3', 'worker_count': 4, 'original_spec': 'app3:Task3:4', }, } # Maximum of (2, 4) = 4, None is ignored assert arbiter._get_minimum_workers() == 4 def test_minimum_workers_all_unlimited(self, arbiter): """All unlimited apps should use default minimum.""" arbiter.app_specs = { 'app1:Task1': { 'import_path': 'app1:Task1', 'worker_count': None, 'original_spec': 'app1:Task1', }, 'app2:Task2': { 'import_path': 'app2:Task2', 'worker_count': None, 'original_spec': 'app2:Task2', }, } assert arbiter._get_minimum_workers() == 1 benoitc-gunicorn-f5fb19e/tests/dirty/test_arbiter_streaming.py000066400000000000000000000257121514360242400251010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty arbiter streaming functionality.""" import asyncio import struct from unittest import mock import pytest from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_request, make_response, make_chunk_message, make_end_message, make_error_response, HEADER_SIZE, ) from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.errors import DirtyError class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.closed = False def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): self.closed = True async def wait_closed(self): pass def get_extra_info(self, name): return None class MockStreamReader: """Mock StreamReader that yields predefined messages.""" def __init__(self, messages): self._data = b'' for msg in messages: self._data += BinaryProtocol._encode_from_dict(msg) self._pos = 0 async def readexactly(self, n): if self._pos + n > len(self._data): raise asyncio.IncompleteReadError(self._data[self._pos:], n) result = self._data[self._pos:self._pos + n] self._pos += n return result def create_arbiter(): """Create a test arbiter with mocked components.""" cfg = mock.Mock() cfg.dirty_timeout = 30 cfg.dirty_workers = 1 cfg.dirty_apps = [] cfg.dirty_graceful_timeout = 30 cfg.on_dirty_starting = mock.Mock() cfg.dirty_post_fork = mock.Mock() cfg.dirty_worker_exit = mock.Mock() log = mock.Mock() with mock.patch('tempfile.mkdtemp', return_value='/tmp/test-dirty'): arbiter = DirtyArbiter(cfg, log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} # Fake worker arbiter.worker_sockets = {1234: '/tmp/worker.sock'} return arbiter class TestArbiterStreamingForwarding: """Tests for arbiter streaming message forwarding.""" @pytest.mark.asyncio async def test_forwards_chunk_messages(self): """Test that arbiter forwards chunk messages to client.""" arbiter = create_arbiter() client_writer = MockStreamWriter() # Mock worker connection that returns chunks chunk1 = make_chunk_message(123, "Hello") chunk2 = make_chunk_message(123, " World") end = make_end_message(123) mock_reader = MockStreamReader([chunk1, chunk2, end]) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "generate") await arbiter._execute_on_worker(1234, request, client_writer) # Should have forwarded all messages assert len(client_writer.messages) == 3 assert client_writer.messages[0]["type"] == "chunk" assert client_writer.messages[0]["data"] == "Hello" assert client_writer.messages[1]["type"] == "chunk" assert client_writer.messages[1]["data"] == " World" assert client_writer.messages[2]["type"] == "end" @pytest.mark.asyncio async def test_forwards_regular_response(self): """Test that arbiter forwards regular response to client.""" arbiter = create_arbiter() client_writer = MockStreamWriter() response = make_response(123, {"result": 42}) mock_reader = MockStreamReader([response]) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "compute") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "response" assert client_writer.messages[0]["result"] == {"result": 42} @pytest.mark.asyncio async def test_forwards_error_mid_stream(self): """Test that arbiter forwards error during streaming.""" arbiter = create_arbiter() client_writer = MockStreamWriter() chunk = make_chunk_message(123, "First") error = make_error_response(123, DirtyError("Something broke")) mock_reader = MockStreamReader([chunk, error]) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "generate") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 2 assert client_writer.messages[0]["type"] == "chunk" assert client_writer.messages[1]["type"] == "error" @pytest.mark.asyncio async def test_timeout_during_streaming(self): """Test that timeout during streaming sends error.""" arbiter = create_arbiter() arbiter.cfg.dirty_timeout = 0.01 # Very short timeout client_writer = MockStreamWriter() # Reader that times out class TimeoutReader: async def readexactly(self, n): await asyncio.sleep(1) # Longer than timeout async def mock_get_connection(pid): return TimeoutReader(), MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "generate") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "error" assert "timeout" in client_writer.messages[0]["error"]["message"].lower() class TestArbiterRouteRequestStreaming: """Tests for route_request with streaming support.""" @pytest.mark.asyncio async def test_route_request_no_workers(self): """Test route_request when no workers available.""" arbiter = create_arbiter() arbiter.workers = {} # No workers client_writer = MockStreamWriter() request = make_request(123, "test:App", "generate") await arbiter.route_request(request, client_writer) assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "error" assert "No dirty workers" in client_writer.messages[0]["error"]["message"] @pytest.mark.asyncio async def test_route_request_starts_consumer(self): """Test that route_request starts consumer if needed.""" arbiter = create_arbiter() # Mock _execute_on_worker to complete immediately async def mock_execute(pid, request, client_writer): response = make_response(123, "result") await DirtyProtocol.write_message_async(client_writer, response) arbiter._execute_on_worker = mock_execute client_writer = MockStreamWriter() request = make_request(123, "test:App", "compute") # Worker queue should be created assert 1234 not in arbiter.worker_queues await arbiter.route_request(request, client_writer) # Consumer should have been started assert 1234 in arbiter.worker_queues assert 1234 in arbiter.worker_consumers # Clean up arbiter.worker_consumers[1234].cancel() class TestArbiterStreamingManyChunks: """Tests for streaming with many chunks.""" @pytest.mark.asyncio async def test_forwards_many_chunks(self): """Test that arbiter forwards many chunks correctly.""" arbiter = create_arbiter() client_writer = MockStreamWriter() # Generate 50 chunks + end messages = [] for i in range(50): messages.append(make_chunk_message(123, f"chunk-{i}")) messages.append(make_end_message(123)) mock_reader = MockStreamReader(messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "generate") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 51 assert client_writer.messages[0]["data"] == "chunk-0" assert client_writer.messages[49]["data"] == "chunk-49" assert client_writer.messages[50]["type"] == "end" class TestArbiterBackwardCompatibility: """Tests for backward compatibility with non-streaming.""" @pytest.mark.asyncio async def test_handles_regular_response(self): """Test that regular (non-streaming) responses still work.""" arbiter = create_arbiter() client_writer = MockStreamWriter() response = make_response(123, [1, 2, 3, 4, 5]) mock_reader = MockStreamReader([response]) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "get_list") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "response" assert client_writer.messages[0]["result"] == [1, 2, 3, 4, 5] @pytest.mark.asyncio async def test_handles_error_response(self): """Test that error responses still work.""" arbiter = create_arbiter() client_writer = MockStreamWriter() error = make_error_response(123, DirtyError("Something failed")) mock_reader = MockStreamReader([error]) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection request = make_request(123, "test:App", "fail") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "error" benoitc-gunicorn-f5fb19e/tests/dirty/test_client_streaming.py000066400000000000000000000162551514360242400247310ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty client sync streaming functionality.""" import socket import struct import pytest from unittest import mock from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_chunk_message, make_end_message, make_response, make_error_response, HEADER_SIZE, ) from gunicorn.dirty.client import DirtyClient, DirtyStreamIterator from gunicorn.dirty.errors import DirtyError, DirtyConnectionError class MockSocket: """Mock socket that returns predefined messages.""" def __init__(self, messages): self._data = b'' for msg in messages: self._data += BinaryProtocol._encode_from_dict(msg) self._pos = 0 self._sent = [] self.closed = False self._timeout = None def sendall(self, data): self._sent.append(data) def recv(self, n, flags=0): if self._pos >= len(self._data): return b'' end = min(self._pos + n, len(self._data)) result = self._data[self._pos:end] self._pos = end return result def settimeout(self, timeout): self._timeout = timeout def close(self): self.closed = True def create_client_with_mock_socket(messages): """Create a client with a mock socket returning the given messages.""" client = DirtyClient("/tmp/test.sock") client._sock = MockSocket(messages) return client class TestDirtyStreamIterator: """Tests for DirtyStreamIterator.""" def test_stream_returns_iterator(self): """Test that stream() returns an iterator.""" client = DirtyClient("/tmp/test.sock") result = client.stream("test:App", "generate") assert isinstance(result, DirtyStreamIterator) def test_stream_iterator_yields_chunks(self): """Test that stream iterator yields chunks correctly.""" messages = [ make_chunk_message(123, "Hello"), make_chunk_message(123, " "), make_chunk_message(123, "World"), make_end_message(123), ] client = create_client_with_mock_socket(messages) chunks = list(client.stream("test:App", "generate")) assert chunks == ["Hello", " ", "World"] def test_stream_iterator_yields_complex_chunks(self): """Test that stream iterator yields complex data types.""" messages = [ make_chunk_message(123, {"token": "Hello", "score": 0.9}), make_chunk_message(123, {"token": "World", "score": 0.8}), make_end_message(123), ] client = create_client_with_mock_socket(messages) chunks = list(client.stream("test:App", "generate")) assert len(chunks) == 2 assert chunks[0]["token"] == "Hello" assert chunks[1]["token"] == "World" def test_stream_iterator_handles_error(self): """Test that stream iterator raises on error message.""" messages = [ make_chunk_message(123, "First"), make_error_response(123, DirtyError("Something broke")), ] client = create_client_with_mock_socket(messages) iterator = client.stream("test:App", "generate") # First chunk should work chunk = next(iterator) assert chunk == "First" # Second should raise error with pytest.raises(DirtyError) as exc_info: next(iterator) assert "Something broke" in str(exc_info.value) def test_stream_iterator_empty_stream(self): """Test that empty stream (just end) works.""" messages = [make_end_message(123)] client = create_client_with_mock_socket(messages) chunks = list(client.stream("test:App", "generate")) assert chunks == [] def test_stream_iterator_stops_after_exhausted(self): """Test that iterator stays exhausted after StopIteration.""" messages = [ make_chunk_message(123, "Only"), make_end_message(123), ] client = create_client_with_mock_socket(messages) iterator = client.stream("test:App", "generate") # Get the chunk chunk = next(iterator) assert chunk == "Only" # Should stop with pytest.raises(StopIteration): next(iterator) # Should stay stopped with pytest.raises(StopIteration): next(iterator) def test_stream_iterator_with_for_loop(self): """Test stream iterator works in for loop.""" messages = [ make_chunk_message(123, "a"), make_chunk_message(123, "b"), make_chunk_message(123, "c"), make_end_message(123), ] client = create_client_with_mock_socket(messages) result = "" for chunk in client.stream("test:App", "generate"): result += chunk assert result == "abc" def test_stream_sends_request_on_first_iteration(self): """Test that request is sent on first next() call.""" messages = [ make_chunk_message(123, "data"), make_end_message(123), ] client = create_client_with_mock_socket(messages) iterator = client.stream("test:App", "generate", "prompt_arg") # Before iteration, no request sent assert len(client._sock._sent) == 0 # First iteration sends request next(iterator) assert len(client._sock._sent) == 1 # Decode sent request sent_data = client._sock._sent[0] _, _, length = BinaryProtocol.decode_header(sent_data[:HEADER_SIZE]) msg_type_str, request_id, payload = BinaryProtocol.decode_message( sent_data[:HEADER_SIZE + length] ) assert msg_type_str == "request" assert payload["app_path"] == "test:App" assert payload["action"] == "generate" assert payload["args"] == ["prompt_arg"] class TestDirtyStreamIteratorEdgeCases: """Edge cases for streaming.""" def test_stream_many_chunks(self): """Test streaming with many chunks.""" messages = [] for i in range(100): messages.append(make_chunk_message(123, f"chunk-{i}")) messages.append(make_end_message(123)) client = create_client_with_mock_socket(messages) chunks = list(client.stream("test:App", "generate")) assert len(chunks) == 100 assert chunks[0] == "chunk-0" assert chunks[99] == "chunk-99" def test_stream_with_kwargs(self): """Test streaming with keyword arguments.""" messages = [ make_chunk_message(123, "data"), make_end_message(123), ] client = create_client_with_mock_socket(messages) # Use kwargs list(client.stream("test:App", "generate", "arg1", key="value")) # Check the sent request includes kwargs sent_data = client._sock._sent[0] _, _, length = BinaryProtocol.decode_header(sent_data[:HEADER_SIZE]) msg_type_str, request_id, payload = BinaryProtocol.decode_message( sent_data[:HEADER_SIZE + length] ) assert payload["args"] == ["arg1"] assert payload["kwargs"] == {"key": "value"} benoitc-gunicorn-f5fb19e/tests/dirty/test_client_streaming_async.py000066400000000000000000000204061514360242400261170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty client async streaming functionality.""" import asyncio import struct import pytest from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_chunk_message, make_end_message, make_error_response, HEADER_SIZE, ) from gunicorn.dirty.client import DirtyClient, DirtyAsyncStreamIterator from gunicorn.dirty.errors import DirtyError, DirtyTimeoutError class MockAsyncReader: """Mock async reader that returns predefined messages.""" def __init__(self, messages): self._data = b'' for msg in messages: self._data += BinaryProtocol._encode_from_dict(msg) self._pos = 0 async def readexactly(self, n): if self._pos + n > len(self._data): raise asyncio.IncompleteReadError(self._data[self._pos:], n) result = self._data[self._pos:self._pos + n] self._pos += n return result class MockAsyncWriter: """Mock async writer that captures sent data.""" def __init__(self): self._sent = [] self.closed = False def write(self, data): self._sent.append(data) async def drain(self): pass def close(self): self.closed = True async def wait_closed(self): pass def create_async_client_with_mocks(messages): """Create a client with mock async reader/writer.""" client = DirtyClient("/tmp/test.sock") client._reader = MockAsyncReader(messages) client._writer = MockAsyncWriter() return client class TestDirtyAsyncStreamIterator: """Tests for DirtyAsyncStreamIterator.""" def test_stream_async_returns_async_iterator(self): """Test that stream_async() returns an async iterator.""" client = DirtyClient("/tmp/test.sock") result = client.stream_async("test:App", "generate") assert isinstance(result, DirtyAsyncStreamIterator) @pytest.mark.asyncio async def test_async_stream_yields_chunks(self): """Test that async stream iterator yields chunks correctly.""" messages = [ make_chunk_message(123, "Hello"), make_chunk_message(123, " "), make_chunk_message(123, "World"), make_end_message(123), ] client = create_async_client_with_mocks(messages) chunks = [] async for chunk in client.stream_async("test:App", "generate"): chunks.append(chunk) assert chunks == ["Hello", " ", "World"] @pytest.mark.asyncio async def test_async_stream_yields_complex_chunks(self): """Test that async stream iterator yields complex data types.""" messages = [ make_chunk_message(123, {"token": "Hello", "score": 0.9}), make_chunk_message(123, {"token": "World", "score": 0.8}), make_end_message(123), ] client = create_async_client_with_mocks(messages) chunks = [] async for chunk in client.stream_async("test:App", "generate"): chunks.append(chunk) assert len(chunks) == 2 assert chunks[0]["token"] == "Hello" assert chunks[1]["token"] == "World" @pytest.mark.asyncio async def test_async_stream_handles_error(self): """Test that async stream iterator raises on error message.""" messages = [ make_chunk_message(123, "First"), make_error_response(123, DirtyError("Something broke")), ] client = create_async_client_with_mocks(messages) iterator = client.stream_async("test:App", "generate") # First chunk should work chunk = await iterator.__anext__() assert chunk == "First" # Second should raise error with pytest.raises(DirtyError) as exc_info: await iterator.__anext__() assert "Something broke" in str(exc_info.value) @pytest.mark.asyncio async def test_async_stream_empty_stream(self): """Test that empty stream (just end) works.""" messages = [make_end_message(123)] client = create_async_client_with_mocks(messages) chunks = [] async for chunk in client.stream_async("test:App", "generate"): chunks.append(chunk) assert chunks == [] @pytest.mark.asyncio async def test_async_stream_stops_after_exhausted(self): """Test that async iterator stays exhausted after StopAsyncIteration.""" messages = [ make_chunk_message(123, "Only"), make_end_message(123), ] client = create_async_client_with_mocks(messages) iterator = client.stream_async("test:App", "generate") # Get the chunk chunk = await iterator.__anext__() assert chunk == "Only" # Should stop with pytest.raises(StopAsyncIteration): await iterator.__anext__() # Should stay stopped with pytest.raises(StopAsyncIteration): await iterator.__anext__() @pytest.mark.asyncio async def test_async_stream_sends_request_on_first_iteration(self): """Test that request is sent on first async iteration.""" messages = [ make_chunk_message(123, "data"), make_end_message(123), ] client = create_async_client_with_mocks(messages) iterator = client.stream_async("test:App", "generate", "prompt_arg") # Before iteration, no request sent assert len(client._writer._sent) == 0 # First iteration sends request await iterator.__anext__() assert len(client._writer._sent) == 1 # Decode sent request sent_data = client._writer._sent[0] _, _, length = BinaryProtocol.decode_header(sent_data[:HEADER_SIZE]) msg_type_str, request_id, payload = BinaryProtocol.decode_message( sent_data[:HEADER_SIZE + length] ) assert msg_type_str == "request" assert payload["app_path"] == "test:App" assert payload["action"] == "generate" assert payload["args"] == ["prompt_arg"] class TestDirtyAsyncStreamIteratorEdgeCases: """Edge cases for async streaming.""" @pytest.mark.asyncio async def test_async_stream_many_chunks(self): """Test async streaming with many chunks.""" messages = [] for i in range(100): messages.append(make_chunk_message(123, f"chunk-{i}")) messages.append(make_end_message(123)) client = create_async_client_with_mocks(messages) chunks = [] async for chunk in client.stream_async("test:App", "generate"): chunks.append(chunk) assert len(chunks) == 100 assert chunks[0] == "chunk-0" assert chunks[99] == "chunk-99" @pytest.mark.asyncio async def test_async_stream_with_kwargs(self): """Test async streaming with keyword arguments.""" messages = [ make_chunk_message(123, "data"), make_end_message(123), ] client = create_async_client_with_mocks(messages) # Use kwargs chunks = [] async for chunk in client.stream_async("test:App", "generate", "arg1", key="value"): chunks.append(chunk) # Check the sent request includes kwargs sent_data = client._writer._sent[0] _, _, length = BinaryProtocol.decode_header(sent_data[:HEADER_SIZE]) msg_type_str, request_id, payload = BinaryProtocol.decode_message( sent_data[:HEADER_SIZE + length] ) assert payload["args"] == ["arg1"] assert payload["kwargs"] == {"key": "value"} class TestDirtyAsyncStreamTimeout: """Tests for async streaming timeout handling.""" @pytest.mark.asyncio async def test_async_stream_timeout(self): """Test that timeout during async streaming raises DirtyTimeoutError.""" client = DirtyClient("/tmp/test.sock", timeout=0.01) # Create a reader that times out class SlowReader: async def readexactly(self, n): await asyncio.sleep(1) # Longer than timeout client._reader = SlowReader() client._writer = MockAsyncWriter() iterator = client.stream_async("test:App", "generate") with pytest.raises(DirtyTimeoutError): await iterator.__anext__() benoitc-gunicorn-f5fb19e/tests/dirty/test_multi_app_routing.py000066400000000000000000000507021514360242400251360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for routing requests to multiple dirty apps. This module verifies that when multiple dirty apps are configured, messages are correctly routed to the appropriate app based on app_path. """ import asyncio import os import struct import tempfile import pytest from concurrent.futures import ThreadPoolExecutor from gunicorn.config import Config from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_request, HEADER_SIZE, ) from gunicorn.dirty.errors import DirtyAppNotFoundError # App paths for test apps COUNTER_APP_PATH = "tests.support_dirty_apps:CounterApp" ECHO_APP_PATH = "tests.support_dirty_apps:EchoApp" class MockLog: """Mock logger for testing.""" def __init__(self): self.messages = [] def debug(self, msg, *args): self.messages.append(("debug", msg % args if args else msg)) def info(self, msg, *args): self.messages.append(("info", msg % args if args else msg)) def warning(self, msg, *args): self.messages.append(("warning", msg % args if args else msg)) def error(self, msg, *args): self.messages.append(("error", msg % args if args else msg)) def critical(self, msg, *args): self.messages.append(("critical", msg % args if args else msg)) def exception(self, msg, *args): self.messages.append(("exception", msg % args if args else msg)) def close_on_exec(self): pass def reopen_files(self): pass class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.closed = False def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): self.closed = True async def wait_closed(self): pass def get_extra_info(self, name): return None class TestWorkerMultiAppLoading: """Tests for loading multiple apps in a worker.""" def test_worker_loads_multiple_apps(self): """Test that worker loads all configured apps.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() # Both apps should be loaded assert COUNTER_APP_PATH in worker.apps assert ECHO_APP_PATH in worker.apps # Apps should be initialized counter_app = worker.apps[COUNTER_APP_PATH] echo_app = worker.apps[ECHO_APP_PATH] assert counter_app.initialized is True assert echo_app.initialized is True worker._cleanup() def test_worker_apps_are_distinct_instances(self): """Test that each app is a distinct instance.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() counter_app = worker.apps[COUNTER_APP_PATH] echo_app = worker.apps[ECHO_APP_PATH] # They should be different instances assert counter_app is not echo_app # They should be different types assert type(counter_app).__name__ == "CounterApp" assert type(echo_app).__name__ == "EchoApp" worker._cleanup() class TestWorkerMultiAppRouting: """Tests for routing requests to correct app based on app_path.""" @pytest.mark.asyncio async def test_worker_routes_to_counter_app(self): """Test that worker routes request to CounterApp correctly.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: # Call increment on CounterApp result = await worker.execute( COUNTER_APP_PATH, "increment", [], {"amount": 5} ) assert result == 5 # Call get_value on CounterApp result = await worker.execute( COUNTER_APP_PATH, "get_value", [], {} ) assert result == 5 finally: worker._cleanup() @pytest.mark.asyncio async def test_worker_routes_to_echo_app(self): """Test that worker routes request to EchoApp correctly.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: # Call echo on EchoApp result = await worker.execute( ECHO_APP_PATH, "echo", ["hello"], {} ) assert result == "ECHO: hello" # Set new prefix result = await worker.execute( ECHO_APP_PATH, "set_prefix", ["TEST>"], {} ) assert result == "TEST>" # Echo with new prefix result = await worker.execute( ECHO_APP_PATH, "echo", ["world"], {} ) assert result == "TEST> world" finally: worker._cleanup() @pytest.mark.asyncio async def test_worker_routes_mixed_requests(self): """Test routing interleaved requests to different apps.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: # Interleave calls to both apps result = await worker.execute( COUNTER_APP_PATH, "increment", [1], {} ) assert result == 1 result = await worker.execute( ECHO_APP_PATH, "echo", ["first"], {} ) assert result == "ECHO: first" result = await worker.execute( COUNTER_APP_PATH, "increment", [2], {} ) assert result == 3 result = await worker.execute( ECHO_APP_PATH, "echo", ["second"], {} ) assert result == "ECHO: second" # Verify final state of each app result = await worker.execute( COUNTER_APP_PATH, "get_value", [], {} ) assert result == 3 result = await worker.execute( ECHO_APP_PATH, "get_echo_count", [], {} ) assert result == 2 finally: worker._cleanup() class TestAppStateSeparation: """Tests for verifying apps maintain independent state.""" @pytest.mark.asyncio async def test_apps_maintain_separate_state(self): """Test that multiple apps maintain independent state.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: # Modify CounterApp state await worker.execute(COUNTER_APP_PATH, "increment", [10], {}) await worker.execute(COUNTER_APP_PATH, "increment", [5], {}) # Modify EchoApp state await worker.execute(ECHO_APP_PATH, "set_prefix", ["CUSTOM:"], {}) await worker.execute(ECHO_APP_PATH, "echo", ["msg1"], {}) await worker.execute(ECHO_APP_PATH, "echo", ["msg2"], {}) # Verify CounterApp state is independent counter_val = await worker.execute( COUNTER_APP_PATH, "get_value", [], {} ) assert counter_val == 15 # Verify EchoApp state is independent prefix = await worker.execute( ECHO_APP_PATH, "get_prefix", [], {} ) assert prefix == "CUSTOM:" echo_count = await worker.execute( ECHO_APP_PATH, "get_echo_count", [], {} ) assert echo_count == 2 # Reset CounterApp and verify EchoApp unaffected await worker.execute(COUNTER_APP_PATH, "reset", [], {}) counter_val = await worker.execute( COUNTER_APP_PATH, "get_value", [], {} ) assert counter_val == 0 # EchoApp should be unaffected echo_count = await worker.execute( ECHO_APP_PATH, "get_echo_count", [], {} ) assert echo_count == 2 finally: worker._cleanup() class TestUnknownAppPath: """Tests for handling unknown app paths.""" @pytest.mark.asyncio async def test_unknown_app_path_raises_error(self): """Test that unknown app_path raises DirtyAppNotFoundError.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: with pytest.raises(DirtyAppNotFoundError): await worker.execute( "nonexistent:App", "action", [], {} ) finally: worker._cleanup() @pytest.mark.asyncio async def test_handle_request_unknown_app_returns_error(self): """Test that handle_request returns error for unknown app.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: request = make_request( request_id="test-unknown", app_path="unknown:App", action="test" ) writer = MockStreamWriter() await worker.handle_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert "unknown:App" in response["error"]["message"] finally: worker._cleanup() class TestConcurrentMultiAppRequests: """Tests for concurrent requests to different apps.""" @pytest.mark.asyncio async def test_concurrent_requests_to_different_apps(self): """Test concurrent requests routed to different apps.""" cfg = Config() cfg.set("dirty_threads", 4) # Allow concurrent execution log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=4) try: # Create concurrent tasks for both apps tasks = [ worker.execute(COUNTER_APP_PATH, "increment", [1], {}), worker.execute(ECHO_APP_PATH, "echo", ["msg1"], {}), worker.execute(COUNTER_APP_PATH, "increment", [2], {}), worker.execute(ECHO_APP_PATH, "echo", ["msg2"], {}), worker.execute(COUNTER_APP_PATH, "increment", [3], {}), worker.execute(ECHO_APP_PATH, "echo", ["msg3"], {}), ] results = await asyncio.gather(*tasks) # Verify echo results are correct (regardless of order) echo_results = [r for r in results if isinstance(r, str)] assert len(echo_results) == 3 assert all(r.startswith("ECHO:") for r in echo_results) # Counter results will vary based on execution order # but final state should reflect all increments counter_val = await worker.execute( COUNTER_APP_PATH, "get_value", [], {} ) assert counter_val == 6 # 1 + 2 + 3 finally: worker._cleanup() class TestMultiAppProtocolHandling: """Tests for protocol-level handling of multi-app requests.""" @pytest.mark.asyncio async def test_handle_request_routes_correctly(self): """Test handle_request routes to correct app via protocol.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker._executor = ThreadPoolExecutor(max_workers=1) try: # Request to CounterApp request1 = make_request( request_id="req-counter", app_path=COUNTER_APP_PATH, action="increment", args=[5] ) writer1 = MockStreamWriter() await worker.handle_request(request1, writer1) assert len(writer1.messages) == 1 assert writer1.messages[0]["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert writer1.messages[0]["result"] == 5 # Request to EchoApp request2 = make_request( request_id="req-echo", app_path=ECHO_APP_PATH, action="echo", args=["test message"] ) writer2 = MockStreamWriter() await worker.handle_request(request2, writer2) assert len(writer2.messages) == 1 assert writer2.messages[0]["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert writer2.messages[0]["result"] == "ECHO: test message" finally: worker._cleanup() class TestMultiAppCleanup: """Tests for cleanup of multiple apps.""" def test_cleanup_closes_all_apps(self): """Test that cleanup closes all loaded apps.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[COUNTER_APP_PATH, ECHO_APP_PATH], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() counter_app = worker.apps[COUNTER_APP_PATH] echo_app = worker.apps[ECHO_APP_PATH] assert counter_app.closed is False assert echo_app.closed is False worker._cleanup() assert counter_app.closed is True assert echo_app.closed is True class TestMultiAppArbiterIntegration: """Tests for arbiter routing with multiple apps configured.""" @pytest.mark.asyncio async def test_arbiter_routes_no_workers_error(self): """Test arbiter returns error when no workers for multi-app config.""" cfg = Config() cfg.set("dirty_workers", 0) cfg.set("dirty_apps", [COUNTER_APP_PATH, ECHO_APP_PATH]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() try: # Request to CounterApp - should fail (no workers) request = make_request( request_id="test-counter", app_path=COUNTER_APP_PATH, action="increment" ) writer = MockStreamWriter() await arbiter.route_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert "No dirty workers available" in response["error"]["message"] finally: arbiter._cleanup_sync() def test_arbiter_config_has_multiple_apps(self): """Test arbiter config correctly stores multiple apps.""" cfg = Config() cfg.set("dirty_apps", [COUNTER_APP_PATH, ECHO_APP_PATH]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) try: app_paths = arbiter.cfg.dirty_apps assert COUNTER_APP_PATH in app_paths assert ECHO_APP_PATH in app_paths assert len(app_paths) == 2 finally: arbiter._cleanup_sync() benoitc-gunicorn-f5fb19e/tests/dirty/test_per_app_worker_allocation.py000066400000000000000000000250051514360242400266170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Integration tests for per-app worker allocation.""" import pytest from gunicorn.config import Config from gunicorn.dirty.arbiter import DirtyArbiter class MockLog: """Mock logger for testing.""" def __init__(self): self.messages = [] def debug(self, msg, *args): self.messages.append(("debug", msg % args if args else msg)) def info(self, msg, *args): self.messages.append(("info", msg % args if args else msg)) def warning(self, msg, *args): self.messages.append(("warning", msg % args if args else msg)) def error(self, msg, *args): self.messages.append(("error", msg % args if args else msg)) def critical(self, msg, *args): self.messages.append(("critical", msg % args if args else msg)) def exception(self, msg, *args): self.messages.append(("exception", msg % args if args else msg)) def close_on_exec(self): pass def reopen_files(self): pass class TestPerAppWorkerAllocation: """Integration tests for per-app worker allocation.""" def test_heavy_app_loaded_on_limited_workers(self): """App with workers=2 only loaded on 2 of 4 workers.""" cfg = Config() cfg.set("dirty_workers", 4) cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", # unlimited "tests.support_dirty_app:SlowDirtyApp:2", # limited to 2 ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Simulate spawning 4 workers for i in range(4): apps = arbiter._get_apps_for_new_worker() arbiter._register_worker_apps(1000 + i, apps) # Check distribution unlimited_app = "tests.support_dirty_app:TestDirtyApp" limited_app = "tests.support_dirty_app:SlowDirtyApp" # Unlimited app should be on all 4 workers assert len(arbiter.app_worker_map[unlimited_app]) == 4 # Limited app should only be on 2 workers assert len(arbiter.app_worker_map[limited_app]) == 2 arbiter._cleanup_sync() def test_light_app_loaded_on_all_workers(self): """App with workers=None loaded on all workers.""" cfg = Config() cfg.set("dirty_workers", 4) cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Simulate spawning 4 workers for i in range(4): apps = arbiter._get_apps_for_new_worker() arbiter._register_worker_apps(1000 + i, apps) # App should be on all 4 workers app_path = "tests.support_dirty_app:TestDirtyApp" assert len(arbiter.app_worker_map[app_path]) == 4 arbiter._cleanup_sync() def test_mixed_apps_correct_distribution(self): """Mix of limited and unlimited apps distributed correctly.""" cfg = Config() cfg.set("dirty_workers", 4) cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", # unlimited "tests.support_dirty_app:SlowDirtyApp:1", # limited to 1 ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Simulate spawning 4 workers for i in range(4): apps = arbiter._get_apps_for_new_worker() arbiter._register_worker_apps(1000 + i, apps) unlimited_app = "tests.support_dirty_app:TestDirtyApp" limited_app = "tests.support_dirty_app:SlowDirtyApp" # Unlimited app on all workers assert len(arbiter.app_worker_map[unlimited_app]) == 4 # Limited app on only 1 worker assert len(arbiter.app_worker_map[limited_app]) == 1 arbiter._cleanup_sync() @pytest.mark.asyncio async def test_request_routing_respects_allocation(self): """Requests only routed to workers with the target app.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp:1", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Set up workers arbiter.workers[1001] = "worker1" arbiter.workers[1002] = "worker2" # Worker 1001 has both apps, worker 1002 has only TestDirtyApp arbiter._register_worker_apps(1001, [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp", ]) arbiter._register_worker_apps(1002, [ "tests.support_dirty_app:TestDirtyApp", ]) # Request for SlowDirtyApp should only go to worker 1001 worker = await arbiter._get_available_worker("tests.support_dirty_app:SlowDirtyApp") assert worker == 1001 # Request for TestDirtyApp should go to either worker = await arbiter._get_available_worker("tests.support_dirty_app:TestDirtyApp") assert worker in [1001, 1002] arbiter._cleanup_sync() def test_worker_crash_app_reassigned_to_new_worker(self): """When worker dies, new worker gets the app it had.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp:1", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = 12345 # Set up initial workers arbiter.workers[1001] = "worker1" arbiter.worker_sockets[1001] = "/tmp/fake1.sock" # Worker 1001 has both apps arbiter._register_worker_apps(1001, [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp", ]) # Simulate worker crash arbiter._cleanup_worker(1001) # Apps should be queued for respawn assert len(arbiter._pending_respawns) == 1 pending_apps = arbiter._pending_respawns[0] assert "tests.support_dirty_app:TestDirtyApp" in pending_apps assert "tests.support_dirty_app:SlowDirtyApp" in pending_apps arbiter._cleanup_sync() @pytest.mark.asyncio async def test_worker_crash_other_workers_still_serve_app(self): """When one of two workers dies, other still serves requests.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = 12345 # Set up two workers for the same app arbiter.workers[1001] = "worker1" arbiter.worker_sockets[1001] = "/tmp/fake1.sock" arbiter.workers[1002] = "worker2" arbiter.worker_sockets[1002] = "/tmp/fake2.sock" app_path = "tests.support_dirty_app:TestDirtyApp" arbiter._register_worker_apps(1001, [app_path]) arbiter._register_worker_apps(1002, [app_path]) # Both workers serve the app assert len(arbiter.app_worker_map[app_path]) == 2 # Worker 1001 crashes arbiter._cleanup_worker(1001) # Worker 1002 still serves requests assert len(arbiter.app_worker_map[app_path]) == 1 assert 1002 in arbiter.app_worker_map[app_path] worker = await arbiter._get_available_worker(app_path) assert worker == 1002 arbiter._cleanup_sync() @pytest.mark.asyncio async def test_worker_crash_sole_worker_app_unavailable_until_respawn(self): """When sole worker for app dies, requests fail until respawn.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:SlowDirtyApp:1", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = 12345 # Only one worker for this app arbiter.workers[1001] = "worker1" arbiter.worker_sockets[1001] = "/tmp/fake1.sock" app_path = "tests.support_dirty_app:SlowDirtyApp" arbiter._register_worker_apps(1001, [app_path]) # Worker crashes arbiter._cleanup_worker(1001) # No workers available for the app worker = await arbiter._get_available_worker(app_path) assert worker is None arbiter._cleanup_sync() def test_config_format_module_class_n(self): """Config 'mod:Class:2' correctly limits to 2 workers.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp:2", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Check parsed spec app_path = "tests.support_dirty_app:TestDirtyApp" assert arbiter.app_specs[app_path]["worker_count"] == 2 arbiter._cleanup_sync() def test_class_attribute_workers_detected(self): """App with workers=2 class attribute is detected by arbiter.""" cfg = Config() cfg.set("dirty_workers", 4) cfg.set("dirty_apps", [ "tests.support_dirty_app:HeavyModelApp", # Has workers=2 class attr ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Check parsed spec - should read workers=2 from class app_path = "tests.support_dirty_app:HeavyModelApp" assert arbiter.app_specs[app_path]["worker_count"] == 2 # Simulate spawning 4 workers for i in range(4): apps = arbiter._get_apps_for_new_worker() arbiter._register_worker_apps(1000 + i, apps) # HeavyModelApp should only be on 2 workers assert len(arbiter.app_worker_map[app_path]) == 2 arbiter._cleanup_sync() def test_config_override_takes_precedence_over_class_attribute(self): """Config :N takes precedence over class workers attribute.""" cfg = Config() cfg.set("dirty_workers", 4) cfg.set("dirty_apps", [ # HeavyModelApp has workers=2, but config says 1 "tests.support_dirty_app:HeavyModelApp:1", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Config override (1) should take precedence app_path = "tests.support_dirty_app:HeavyModelApp" assert arbiter.app_specs[app_path]["worker_count"] == 1 # Simulate spawning 4 workers for i in range(4): apps = arbiter._get_apps_for_new_worker() arbiter._register_worker_apps(1000 + i, apps) # Should only be on 1 worker (config override) assert len(arbiter.app_worker_map[app_path]) == 1 arbiter._cleanup_sync() benoitc-gunicorn-f5fb19e/tests/dirty/test_streaming_integration.py000066400000000000000000000371101514360242400257670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Integration tests for dirty streaming functionality. These tests verify the full streaming pipeline: client -> arbiter -> worker -> generator -> chunks -> client """ import asyncio import os import struct import tempfile import pytest from unittest import mock from gunicorn.config import Config from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_request, make_chunk_message, make_end_message, make_response, make_error_response, HEADER_SIZE, ) from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.client import DirtyClient from gunicorn.dirty.errors import DirtyError class MockLog: """Mock logger for testing.""" def __init__(self): self.messages = [] def debug(self, msg, *args): self.messages.append(("debug", msg % args if args else msg)) def info(self, msg, *args): self.messages.append(("info", msg % args if args else msg)) def warning(self, msg, *args): self.messages.append(("warning", msg % args if args else msg)) def error(self, msg, *args): self.messages.append(("error", msg % args if args else msg)) def close_on_exec(self): pass def reopen_files(self): pass class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.closed = False def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): self.closed = True async def wait_closed(self): pass def get_extra_info(self, name): return None class MockStreamReader: """Mock StreamReader that yields predefined messages.""" def __init__(self, messages): self._data = b'' for msg in messages: self._data += BinaryProtocol._encode_from_dict(msg) self._pos = 0 async def readexactly(self, n): if self._pos + n > len(self._data): raise asyncio.IncompleteReadError(self._data[self._pos:], n) result = self._data[self._pos:self._pos + n] self._pos += n return result class TestStreamingEndToEnd: """End-to-end streaming tests using mocked components.""" @pytest.mark.asyncio async def test_sync_generator_end_to_end(self): """Test complete flow: sync generator -> worker -> arbiter -> client.""" # Simulate what a worker would produce for a sync generator worker_messages = [ make_chunk_message(123, "Hello"), make_chunk_message(123, " "), make_chunk_message(123, "World"), make_end_message(123), ] # Create an arbiter with mocked worker connection cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} # Mock worker connection mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection # Create client writer to capture messages client_writer = MockStreamWriter() # Execute request through arbiter request = make_request(123, "test:App", "generate") await arbiter._execute_on_worker(1234, request, client_writer) # Verify all messages were forwarded assert len(client_writer.messages) == 4 assert client_writer.messages[0]["type"] == "chunk" assert client_writer.messages[0]["data"] == "Hello" assert client_writer.messages[1]["data"] == " " assert client_writer.messages[2]["data"] == "World" assert client_writer.messages[3]["type"] == "end" arbiter._cleanup_sync() @pytest.mark.asyncio async def test_async_generator_end_to_end(self): """Test complete flow: async generator -> worker -> arbiter -> client.""" worker_messages = [ make_chunk_message(456, "Async"), make_chunk_message(456, " "), make_chunk_message(456, "Stream"), make_end_message(456), ] cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request(456, "test:App", "async_generate") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 4 assert client_writer.messages[0]["data"] == "Async" assert client_writer.messages[3]["type"] == "end" arbiter._cleanup_sync() class TestStreamingErrorHandling: """Tests for error handling during streaming.""" @pytest.mark.asyncio async def test_error_mid_stream(self): """Test that errors during streaming are properly forwarded.""" worker_messages = [ make_chunk_message(789, "First"), make_chunk_message(789, "Second"), make_error_response(789, DirtyError("Stream failed")), ] cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request(789, "test:App", "generate_with_error") await arbiter._execute_on_worker(1234, request, client_writer) # Should have 2 chunks + 1 error assert len(client_writer.messages) == 3 assert client_writer.messages[0]["type"] == "chunk" assert client_writer.messages[1]["type"] == "chunk" assert client_writer.messages[2]["type"] == "error" assert "Stream failed" in client_writer.messages[2]["error"]["message"] arbiter._cleanup_sync() class TestStreamingBackwardCompatibility: """Tests for backward compatibility with non-streaming responses.""" @pytest.mark.asyncio async def test_non_streaming_response_still_works(self): """Test that regular (non-streaming) responses still work.""" worker_messages = [ make_response("req-abc", {"result": 42, "data": [1, 2, 3]}), ] cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request("req-abc", "test:App", "compute") await arbiter._execute_on_worker(1234, request, client_writer) # Should have 1 response assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "response" assert client_writer.messages[0]["result"]["result"] == 42 arbiter._cleanup_sync() @pytest.mark.asyncio async def test_error_response_still_works(self): """Test that error responses still work.""" worker_messages = [ make_error_response("req-def", DirtyError("Something failed")), ] cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request("req-def", "test:App", "fail") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 1 assert client_writer.messages[0]["type"] == "error" arbiter._cleanup_sync() class TestStreamingWorkerIntegration: """Integration tests for worker streaming with execute.""" @pytest.mark.asyncio async def test_worker_handles_sync_generator(self): """Test worker properly handles sync generator from execute.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with mock.patch('gunicorn.dirty.worker.WorkerTmp'): worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["test:App"], cfg=cfg, log=log, socket_path="/tmp/test.sock" ) worker.apps = {} worker._executor = None worker.tmp = mock.Mock() writer = MockStreamWriter() # Mock execute to return a sync generator def sync_gen(): yield "one" yield "two" yield "three" async def mock_execute(app_path, action, args, kwargs): return sync_gen() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have 3 chunks + 1 end assert len(writer.messages) == 4 assert writer.messages[0]["data"] == "one" assert writer.messages[1]["data"] == "two" assert writer.messages[2]["data"] == "three" assert writer.messages[3]["type"] == "end" @pytest.mark.asyncio async def test_worker_handles_async_generator(self): """Test worker properly handles async generator from execute.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with mock.patch('gunicorn.dirty.worker.WorkerTmp'): worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["test:App"], cfg=cfg, log=log, socket_path="/tmp/test.sock" ) worker.apps = {} worker._executor = None worker.tmp = mock.Mock() writer = MockStreamWriter() # Mock execute to return an async generator async def async_gen(): yield "async_one" yield "async_two" async def mock_execute(app_path, action, args, kwargs): return async_gen() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(456, "test:App", "async_generate") await worker.handle_request(request, writer) # Should have 2 chunks + 1 end assert len(writer.messages) == 3 assert writer.messages[0]["data"] == "async_one" assert writer.messages[1]["data"] == "async_two" assert writer.messages[2]["type"] == "end" class TestStreamingMixedScenarios: """Tests for mixed streaming scenarios.""" @pytest.mark.asyncio async def test_large_stream(self): """Test streaming with many chunks.""" worker_messages = [] for i in range(500): worker_messages.append(make_chunk_message("req-large", f"chunk-{i}")) worker_messages.append(make_end_message("req-large")) cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request("req-large", "test:App", "large_stream") await arbiter._execute_on_worker(1234, request, client_writer) # Should have 500 chunks + 1 end assert len(client_writer.messages) == 501 assert client_writer.messages[0]["data"] == "chunk-0" assert client_writer.messages[499]["data"] == "chunk-499" assert client_writer.messages[500]["type"] == "end" arbiter._cleanup_sync() @pytest.mark.asyncio async def test_stream_with_complex_data(self): """Test streaming with complex JSON-serializable data.""" worker_messages = [ make_chunk_message("req-complex", { "token": "Hello", "scores": [0.1, 0.2, 0.3], "metadata": {"position": 0} }), make_chunk_message("req-complex", { "token": "World", "scores": [0.4, 0.5], "metadata": {"position": 1} }), make_end_message("req-complex"), ] cfg = Config() cfg.set("dirty_timeout", 30) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True arbiter.workers = {1234: mock.Mock()} arbiter.worker_sockets = {1234: '/tmp/worker.sock'} mock_reader = MockStreamReader(worker_messages) async def mock_get_connection(pid): return mock_reader, MockStreamWriter() arbiter._get_worker_connection = mock_get_connection client_writer = MockStreamWriter() request = make_request("req-complex", "test:App", "complex_stream") await arbiter._execute_on_worker(1234, request, client_writer) assert len(client_writer.messages) == 3 assert client_writer.messages[0]["data"]["token"] == "Hello" assert client_writer.messages[0]["data"]["scores"] == [0.1, 0.2, 0.3] assert client_writer.messages[1]["data"]["metadata"]["position"] == 1 arbiter._cleanup_sync() benoitc-gunicorn-f5fb19e/tests/dirty/test_worker_streaming.py000066400000000000000000000341671514360242400247660ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty worker streaming functionality.""" import asyncio import struct from unittest import mock import pytest from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_request, make_chunk_message, make_end_message, HEADER_SIZE, ) from gunicorn.dirty.worker import DirtyWorker class FakeStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): pass async def wait_closed(self): pass def create_worker(): """Create a test worker with mocked components.""" cfg = mock.Mock() cfg.dirty_timeout = 30 cfg.dirty_threads = 1 cfg.env = None cfg.uid = None cfg.gid = None cfg.initgroups = False cfg.dirty_worker_init = mock.Mock() cfg.umask = 0o22 log = mock.Mock() with mock.patch('gunicorn.dirty.worker.WorkerTmp'): worker = DirtyWorker( age=1, ppid=1, app_paths=["test:App"], cfg=cfg, log=log, socket_path="/tmp/test.sock" ) worker.apps = {} worker._executor = None # Use default executor for sync generator tests worker.tmp = mock.Mock() return worker class TestWorkerSyncGeneratorStreaming: """Tests for sync generator streaming.""" @pytest.mark.asyncio async def test_sync_generator_sends_chunks_and_end(self): """Test that sync generator sends chunk messages then end message.""" def generate_tokens(): yield "Hello" yield " " yield "World" worker = create_worker() writer = FakeStreamWriter() # Mock execute to return the sync generator directly async def mock_execute(app_path, action, args, kwargs): return generate_tokens() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have 3 chunks + 1 end message assert len(writer.messages) == 4 # Check chunk messages assert writer.messages[0]["type"] == "chunk" assert writer.messages[0]["id"] == 123 assert writer.messages[0]["data"] == "Hello" assert writer.messages[1]["type"] == "chunk" assert writer.messages[1]["data"] == " " assert writer.messages[2]["type"] == "chunk" assert writer.messages[2]["data"] == "World" # Check end message assert writer.messages[3]["type"] == "end" assert writer.messages[3]["id"] == 123 @pytest.mark.asyncio async def test_sync_generator_error_mid_stream(self): """Test that error during streaming sends error message.""" def generate_with_error(): yield "First" raise ValueError("Something went wrong") worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return generate_with_error() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have 1 chunk + 1 error message assert len(writer.messages) == 2 assert writer.messages[0]["type"] == "chunk" assert writer.messages[0]["data"] == "First" assert writer.messages[1]["type"] == "error" assert "Something went wrong" in writer.messages[1]["error"]["message"] class TestWorkerAsyncGeneratorStreaming: """Tests for async generator streaming.""" @pytest.mark.asyncio async def test_async_generator_sends_chunks_and_end(self): """Test that async generator sends chunk messages then end message.""" async def async_generate_tokens(): yield "Hello" yield " " yield "World" worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return async_generate_tokens() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have 3 chunks + 1 end message assert len(writer.messages) == 4 # Check chunk messages assert writer.messages[0]["type"] == "chunk" assert writer.messages[0]["id"] == 123 assert writer.messages[0]["data"] == "Hello" assert writer.messages[1]["type"] == "chunk" assert writer.messages[1]["data"] == " " assert writer.messages[2]["type"] == "chunk" assert writer.messages[2]["data"] == "World" # Check end message assert writer.messages[3]["type"] == "end" assert writer.messages[3]["id"] == 123 @pytest.mark.asyncio async def test_async_generator_error_mid_stream(self): """Test that error during async streaming sends error message.""" async def async_generate_with_error(): yield "First" raise ValueError("Async error") worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return async_generate_with_error() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have 1 chunk + 1 error message assert len(writer.messages) == 2 assert writer.messages[0]["type"] == "chunk" assert writer.messages[0]["data"] == "First" assert writer.messages[1]["type"] == "error" assert "Async error" in writer.messages[1]["error"]["message"] class TestWorkerNonStreamingBackwardCompat: """Tests for backward compatibility with non-streaming responses.""" @pytest.mark.asyncio async def test_non_generator_returns_response(self): """Test that non-generator method returns regular response.""" worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return args[0] + args[1] with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "compute", args=(2, 3)) await worker.handle_request(request, writer) # Should have 1 response message assert len(writer.messages) == 1 assert writer.messages[0]["type"] == "response" assert writer.messages[0]["id"] == 123 assert writer.messages[0]["result"] == 5 @pytest.mark.asyncio async def test_list_result_not_treated_as_streaming(self): """Test that list result is not treated as streaming.""" worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return [1, 2, 3, 4, 5] with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "get_list") await worker.handle_request(request, writer) # Should have 1 response message (not 5 chunks) assert len(writer.messages) == 1 assert writer.messages[0]["type"] == "response" assert writer.messages[0]["result"] == [1, 2, 3, 4, 5] @pytest.mark.asyncio async def test_error_in_execute_sends_error(self): """Test that error in execute sends error response.""" worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): raise RuntimeError("Failed!") with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "fail") await worker.handle_request(request, writer) # Should have 1 error message assert len(writer.messages) == 1 assert writer.messages[0]["type"] == "error" assert "Failed!" in writer.messages[0]["error"]["message"] @pytest.mark.asyncio async def test_none_result(self): """Test that None result works correctly.""" worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return None with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "void") await worker.handle_request(request, writer) # Should have 1 response message assert len(writer.messages) == 1 assert writer.messages[0]["type"] == "response" assert writer.messages[0]["result"] is None class TestWorkerStreamingComplexData: """Tests for streaming with complex data types.""" @pytest.mark.asyncio async def test_streaming_dict_chunks(self): """Test streaming chunks that are dictionaries.""" async def generate_tokens(): yield {"token": "Hello", "score": 0.9} yield {"token": "World", "score": 0.8} worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return generate_tokens() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) assert len(writer.messages) == 3 # 2 chunks + 1 end assert writer.messages[0]["data"]["token"] == "Hello" assert writer.messages[0]["data"]["score"] == 0.9 assert writer.messages[1]["data"]["token"] == "World" @pytest.mark.asyncio async def test_streaming_empty_generator(self): """Test streaming with empty generator.""" async def empty_generate(): return yield # Make it a generator worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return empty_generate() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have just 1 end message assert len(writer.messages) == 1 assert writer.messages[0]["type"] == "end" @pytest.mark.asyncio async def test_streaming_many_chunks(self): """Test streaming with many chunks.""" async def generate_many(): for i in range(100): yield f"chunk-{i}" worker = create_worker() writer = FakeStreamWriter() async def mock_execute(app_path, action, args, kwargs): return generate_many() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have 100 chunks + 1 end message assert len(writer.messages) == 101 assert writer.messages[0]["data"] == "chunk-0" assert writer.messages[99]["data"] == "chunk-99" assert writer.messages[100]["type"] == "end" class TestWorkerStreamingHeartbeat: """Tests for heartbeat updates during streaming.""" @pytest.mark.asyncio async def test_heartbeat_updated_during_streaming(self): """Test that heartbeat is updated during streaming.""" async def generate_tokens(): yield "Hello" yield "World" worker = create_worker() writer = FakeStreamWriter() # Track notify calls notify_count = [0] original_notify = worker.notify def counting_notify(): notify_count[0] += 1 return original_notify() if callable(original_notify) else None worker.notify = counting_notify async def mock_execute(app_path, action, args, kwargs): return generate_tokens() with mock.patch.object(worker, 'execute', side_effect=mock_execute): request = make_request(123, "test:App", "generate") await worker.handle_request(request, writer) # Should have been notified at least once per chunk + initial assert notify_count[0] >= 2 # At least one per chunk class TestWorkerMessageTypeValidation: """Tests for message type validation.""" @pytest.mark.asyncio async def test_unknown_message_type_sends_error(self): """Test that unknown message type sends error response.""" worker = create_worker() writer = FakeStreamWriter() # Send a message with unknown type message = {"type": "unknown", "id": 123} await worker.handle_request(message, writer) assert len(writer.messages) == 1 assert writer.messages[0]["type"] == "error" assert "Unknown message type" in writer.messages[0]["error"]["message"] benoitc-gunicorn-f5fb19e/tests/docker/000077500000000000000000000000001514360242400200745ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/__init__.py000066400000000000000000000002301514360242400222000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Docker-based integration tests package.""" benoitc-gunicorn-f5fb19e/tests/docker/asgi/000077500000000000000000000000001514360242400210175ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/asgi/Dockerfile000066400000000000000000000005041514360242400230100ustar00rootroot00000000000000FROM python:3.11-slim WORKDIR /build # Copy gunicorn source COPY . /build/ # Install gunicorn from source RUN pip install --no-cache-dir -e . # Copy test app WORKDIR /app COPY tests/docker/asgi/app.py /app/ # Expose HTTP port EXPOSE 8000 CMD ["gunicorn", "--worker-class", "asgi", "--bind", "0.0.0.0:8000", "app:app"] benoitc-gunicorn-f5fb19e/tests/docker/asgi/app.py000066400000000000000000000027111514360242400221520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Simple ASGI test application for HTTP protocol testing.""" async def app(scope, receive, send): """Simple ASGI application that echoes request info.""" if scope["type"] == "lifespan": while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return if scope["type"] != "http": return # Read body body = b"" while True: message = await receive() body += message.get("body", b"") if not message.get("more_body", False): break # Build response method = scope["method"] path = scope["path"] query = scope.get("query_string", b"").decode("utf-8") response_body = f"Method: {method}\nPath: {path}\nQuery: {query}\nBody: {body.decode('utf-8')}\n" response_bytes = response_body.encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ [b"content-type", b"text/plain"], [b"content-length", str(len(response_bytes)).encode()], ], }) await send({ "type": "http.response.body", "body": response_bytes, }) benoitc-gunicorn-f5fb19e/tests/docker/asgi/docker-compose.yml000066400000000000000000000004211514360242400244510ustar00rootroot00000000000000services: gunicorn: build: context: ../../.. dockerfile: tests/docker/asgi/Dockerfile command: > gunicorn --worker-class asgi --bind 0.0.0.0:8000 --workers 1 --log-level debug app:app ports: - "8080:8000" benoitc-gunicorn-f5fb19e/tests/docker/asgi/test_asgi.sh000077500000000000000000000061251514360242400233440ustar00rootroot00000000000000#!/bin/bash # Integration test for ASGI HTTP protocol support # # This script tests that gunicorn's ASGI worker correctly handles # HTTP requests directly (without uWSGI protocol). set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" cd "$SCRIPT_DIR" # Use IPv4 explicitly to avoid Docker IPv6 issues BASE_URL="http://127.0.0.1:8080" cleanup() { echo "Cleaning up..." docker compose down -v 2>/dev/null || true } trap cleanup EXIT echo "=== Building and starting containers ===" docker compose up -d --build echo "=== Waiting for services to be ready ===" sleep 5 echo "=== Running tests ===" # Test 1: Simple GET request echo "Test 1: Simple GET request" RESPONSE=$(curl -s "$BASE_URL/") if echo "$RESPONSE" | grep -q "Method: GET"; then echo " PASS: GET request works" else echo " FAIL: GET request failed" echo " Response: $RESPONSE" exit 1 fi # Test 2: GET with query string echo "Test 2: GET with query string" RESPONSE=$(curl -s "$BASE_URL/search?q=test&page=1") if echo "$RESPONSE" | grep -q "Query: q=test&page=1"; then echo " PASS: Query string works" else echo " FAIL: Query string failed" echo " Response: $RESPONSE" exit 1 fi # Test 3: POST with body echo "Test 3: POST with body" RESPONSE=$(curl -s -X POST -d "hello=world" "$BASE_URL/submit") if echo "$RESPONSE" | grep -q "Method: POST" && echo "$RESPONSE" | grep -q "Body: hello=world"; then echo " PASS: POST with body works" else echo " FAIL: POST with body failed" echo " Response: $RESPONSE" exit 1 fi # Test 4: Path handling echo "Test 4: Path handling" RESPONSE=$(curl -s "$BASE_URL/api/v1/users") if echo "$RESPONSE" | grep -q "Path: /api/v1/users"; then echo " PASS: Path handling works" else echo " FAIL: Path handling failed" echo " Response: $RESPONSE" exit 1 fi # Test 5: Multiple requests (keepalive) echo "Test 5: Multiple requests (keepalive)" for i in 1 2 3; do RESPONSE=$(curl -s "$BASE_URL/request/$i") if ! echo "$RESPONSE" | grep -q "Path: /request/$i"; then echo " FAIL: Request $i failed" exit 1 fi done echo " PASS: Multiple requests work" # Test 6: Large POST body echo "Test 6: Large POST body" LARGE_BODY=$(python3 -c "print('x' * 10000)") RESPONSE=$(curl -s -X POST -d "$LARGE_BODY" "$BASE_URL/large") if echo "$RESPONSE" | grep -q "Method: POST" && echo "$RESPONSE" | grep -c "x" | grep -q "10000"; then echo " PASS: Large POST body works" else # Verify body length in response BODY_LINE=$(echo "$RESPONSE" | grep "Body:") BODY_LEN=${#BODY_LINE} if [ "$BODY_LEN" -gt 10000 ]; then echo " PASS: Large POST body works" else echo " FAIL: Large POST body failed" echo " Response length: $BODY_LEN" exit 1 fi fi # Test 7: HTTP headers echo "Test 7: Custom headers" RESPONSE=$(curl -s -H "X-Custom-Header: test-value" "$BASE_URL/headers") if echo "$RESPONSE" | grep -q "Method: GET"; then echo " PASS: Custom headers work" else echo " FAIL: Custom headers failed" echo " Response: $RESPONSE" exit 1 fi echo "" echo "=== All tests passed! ===" benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/000077500000000000000000000000001514360242400232115ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/Dockerfile.gunicorn000066400000000000000000000027611514360242400270340ustar00rootroot00000000000000FROM python:3.12-slim # Install build dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ curl \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # Copy the gunicorn source code and install it with all extras COPY . /gunicorn-src/ RUN pip install --no-cache-dir /gunicorn-src/[http2,testing] # Install additional ASGI frameworks for testing RUN pip install --no-cache-dir \ starlette>=0.35.0 \ fastapi>=0.109.0 \ websockets>=12.0 # Copy the test applications COPY tests/docker/asgi_compliance/apps /app/apps # Create entrypoint script RUN echo '#!/bin/bash\n\ set -e\n\ \n\ if [ "$USE_SSL" = "1" ]; then\n\ exec gunicorn "apps.main_app:app" \\\n\ --bind "[::]:8443" \\\n\ --worker-class "asgi" \\\n\ --workers 2 \\\n\ --worker-connections 1000 \\\n\ --certfile "/certs/server.crt" \\\n\ --keyfile "/certs/server.key" \\\n\ --asgi-disconnect-grace-period 0 \\\n\ --log-level "debug" \\\n\ --access-logfile "-" \\\n\ --error-logfile "-"\n\ else\n\ exec gunicorn "apps.main_app:app" \\\n\ --bind "[::]:8000" \\\n\ --worker-class "asgi" \\\n\ --workers 2 \\\n\ --worker-connections 1000 \\\n\ --asgi-disconnect-grace-period 0 \\\n\ --log-level "debug" \\\n\ --access-logfile "-" \\\n\ --error-logfile "-"\n\ fi\n\ ' > /app/entrypoint.sh && chmod +x /app/entrypoint.sh EXPOSE 8000 8443 CMD ["/app/entrypoint.sh"] benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/Dockerfile.nginx000066400000000000000000000004141514360242400263240ustar00rootroot00000000000000FROM nginx:1.25-alpine # Install curl for health checks RUN apk add --no-cache curl # Remove default config RUN rm /etc/nginx/conf.d/default.conf # Copy custom nginx config COPY nginx.conf /etc/nginx/nginx.conf EXPOSE 8080 8444 CMD ["nginx", "-g", "daemon off;"] benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/__init__.py000066400000000000000000000003471514360242400253260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI Compliance Docker Integration Tests. This package contains Docker-based integration tests for ASGI compliance. """ benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/000077500000000000000000000000001514360242400241545ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/__init__.py000066400000000000000000000002411514360242400262620ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI test applications for compliance testing. """ benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/framework_apps.py000066400000000000000000000307051514360242400275530ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Framework integration test applications. Tests integration with popular ASGI frameworks like Starlette and FastAPI. These apps require the frameworks to be installed. """ import json import os # Framework availability flags STARLETTE_AVAILABLE = False FASTAPI_AVAILABLE = False try: from starlette.applications import Starlette from starlette.responses import ( JSONResponse, PlainTextResponse, StreamingResponse, ) from starlette.routing import Route, WebSocketRoute from starlette.websockets import WebSocket STARLETTE_AVAILABLE = True except ImportError: pass try: from fastapi import FastAPI, Request, WebSocket as FastAPIWebSocket from fastapi.responses import ( JSONResponse as FastAPIJSONResponse, StreamingResponse as FastAPIStreamingResponse, ) FASTAPI_AVAILABLE = True except ImportError: pass # ============================================================================ # Pure ASGI Fallback App (when frameworks not available) # ============================================================================ async def fallback_app(scope, receive, send): """Fallback ASGI app when frameworks are not installed.""" if scope["type"] == "lifespan": while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return return if scope["type"] != "http": return body = json.dumps({ "error": "Framework not available", "starlette_available": STARLETTE_AVAILABLE, "fastapi_available": FASTAPI_AVAILABLE, "message": "Install starlette and/or fastapi to use this app", }).encode("utf-8") await send({ "type": "http.response.start", "status": 503, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) # ============================================================================ # Starlette Application # ============================================================================ if STARLETTE_AVAILABLE: import asyncio async def starlette_homepage(request): """Starlette homepage.""" return PlainTextResponse("Hello from Starlette!") async def starlette_json(request): """Return JSON response.""" return JSONResponse({ "framework": "starlette", "method": request.method, "path": request.url.path, "query_params": dict(request.query_params), }) async def starlette_echo(request): """Echo request body.""" body = await request.body() return PlainTextResponse(body.decode("utf-8", errors="replace")) async def starlette_headers(request): """Return request headers.""" return JSONResponse(dict(request.headers)) async def starlette_scope(request): """Return ASGI scope.""" scope = request.scope scope_json = { "type": scope["type"], "asgi": scope["asgi"], "http_version": scope["http_version"], "method": scope["method"], "scheme": scope["scheme"], "path": scope["path"], "query_string": scope["query_string"].decode("latin-1"), "root_path": scope.get("root_path", ""), "headers": [ [n.decode("latin-1"), v.decode("latin-1")] for n, v in scope["headers"] ], "server": list(scope["server"]) if scope.get("server") else None, "client": list(scope["client"]) if scope.get("client") else None, } return JSONResponse(scope_json) async def starlette_streaming(request): """Streaming response.""" async def generate(): for i in range(10): yield f"Chunk {i + 1}\n".encode("utf-8") await asyncio.sleep(0.1) return StreamingResponse(generate(), media_type="text/plain") async def starlette_websocket_endpoint(websocket: WebSocket): """WebSocket echo endpoint.""" await websocket.accept() try: while True: data = await websocket.receive_text() await websocket.send_text(f"Starlette echo: {data}") except Exception: pass async def starlette_health(request): """Health check.""" return PlainTextResponse("OK") # Lifespan context manager from contextlib import asynccontextmanager @asynccontextmanager async def starlette_lifespan(app): """Starlette lifespan context manager.""" # Startup app.state.startup_time = asyncio.get_event_loop().time() app.state.started = True yield # Shutdown app.state.started = False starlette_routes = [ Route("/", starlette_homepage), Route("/json", starlette_json), Route("/echo", starlette_echo, methods=["POST"]), Route("/headers", starlette_headers), Route("/scope", starlette_scope), Route("/streaming", starlette_streaming), Route("/health", starlette_health), WebSocketRoute("/ws/echo", starlette_websocket_endpoint), ] starlette_app = Starlette( routes=starlette_routes, lifespan=starlette_lifespan, ) else: starlette_app = fallback_app # ============================================================================ # FastAPI Application # ============================================================================ if FASTAPI_AVAILABLE: import asyncio from contextlib import asynccontextmanager from typing import Any, Dict @asynccontextmanager async def fastapi_lifespan(app: FastAPI): """FastAPI lifespan context manager.""" # Startup app.state.startup_time = asyncio.get_event_loop().time() app.state.started = True yield # Shutdown app.state.started = False fastapi_app = FastAPI( title="ASGI Compliance Test - FastAPI", lifespan=fastapi_lifespan, ) @fastapi_app.get("/") async def fastapi_root(): """FastAPI homepage.""" return {"message": "Hello from FastAPI!"} @fastapi_app.get("/json") async def fastapi_json(request: Request) -> Dict[str, Any]: """Return JSON response with request info.""" return { "framework": "fastapi", "method": request.method, "path": str(request.url.path), "query_params": dict(request.query_params), } @fastapi_app.post("/echo") async def fastapi_echo(request: Request): """Echo request body.""" body = await request.body() return FastAPIJSONResponse(content={ "echo": body.decode("utf-8", errors="replace"), "length": len(body), }) @fastapi_app.get("/headers") async def fastapi_headers(request: Request): """Return request headers.""" return dict(request.headers) @fastapi_app.get("/scope") async def fastapi_scope(request: Request): """Return ASGI scope.""" scope = request.scope return { "type": scope["type"], "asgi": scope["asgi"], "http_version": scope["http_version"], "method": scope["method"], "scheme": scope["scheme"], "path": scope["path"], "query_string": scope["query_string"].decode("latin-1"), "root_path": scope.get("root_path", ""), "server": list(scope["server"]) if scope.get("server") else None, "client": list(scope["client"]) if scope.get("client") else None, } @fastapi_app.get("/streaming") async def fastapi_streaming(): """Streaming response.""" async def generate(): for i in range(10): yield f"Chunk {i + 1}\n" await asyncio.sleep(0.1) return FastAPIStreamingResponse(generate(), media_type="text/plain") @fastapi_app.get("/health") async def fastapi_health(): """Health check.""" return {"status": "ok"} @fastapi_app.get("/items/{item_id}") async def fastapi_get_item(item_id: int, q: str = None): """Path parameter example.""" return {"item_id": item_id, "query": q} @fastapi_app.post("/items/") async def fastapi_create_item(request: Request): """Create item example.""" body = await request.json() return {"created": body} @fastapi_app.websocket("/ws/echo") async def fastapi_websocket_echo(websocket: FastAPIWebSocket): """WebSocket echo endpoint.""" await websocket.accept() try: while True: data = await websocket.receive_text() await websocket.send_text(f"FastAPI echo: {data}") except Exception: pass else: fastapi_app = fallback_app # ============================================================================ # Combined Application Router # ============================================================================ async def combined_app(scope, receive, send): """Combined app that routes based on path prefix.""" if scope["type"] == "lifespan": # Handle lifespan for both apps while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return return path = scope.get("path", "") if path.startswith("/starlette"): # Strip prefix for Starlette scope = dict(scope) scope["path"] = path[10:] or "/" scope["raw_path"] = scope["path"].encode("latin-1") await starlette_app(scope, receive, send) elif path.startswith("/fastapi"): # Strip prefix for FastAPI scope = dict(scope) scope["path"] = path[8:] or "/" scope["raw_path"] = scope["path"].encode("latin-1") await fastapi_app(scope, receive, send) elif path == "/": # Root - show available apps body = json.dumps({ "apps": { "starlette": { "available": STARLETTE_AVAILABLE, "prefix": "/starlette", }, "fastapi": { "available": FASTAPI_AVAILABLE, "prefix": "/fastapi", }, }, "endpoints": { "starlette": ["/", "/json", "/echo", "/headers", "/scope", "/streaming", "/ws/echo"], "fastapi": ["/", "/json", "/echo", "/headers", "/scope", "/streaming", "/items/{id}", "/ws/echo"], }, }).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) elif path == "/health": body = b"OK" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) else: body = b"Not Found - use /starlette/* or /fastapi/* prefixes" await send({ "type": "http.response.start", "status": 404, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) # Export the apps app = combined_app benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/http_app.py000066400000000000000000000407451514360242400263570ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP test application for ASGI compliance testing. Provides various endpoints to test HTTP request/response handling, headers, body processing, and ASGI scope inspection. """ import json import time async def app(scope, receive, send): """Main ASGI HTTP application with multiple test endpoints.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) return if scope["type"] != "http": return path = scope["path"] method = scope["method"] # Route to appropriate handler if path == "/": await handle_root(scope, receive, send) elif path == "/echo": await handle_echo(scope, receive, send) elif path == "/headers": await handle_headers(scope, receive, send) elif path == "/scope": await handle_scope(scope, receive, send) elif path.startswith("/status"): await handle_status(scope, receive, send) elif path == "/large": await handle_large(scope, receive, send) elif path == "/method": await handle_method(scope, receive, send) elif path == "/query": await handle_query(scope, receive, send) elif path == "/post-json": await handle_post_json(scope, receive, send) elif path == "/delay": await handle_delay(scope, receive, send) elif path == "/health": await handle_health(scope, receive, send) elif path == "/early-hints": await handle_early_hints(scope, receive, send) elif path == "/cookies": await handle_cookies(scope, receive, send) elif path == "/redirect": await handle_redirect(scope, receive, send) else: await handle_not_found(scope, receive, send) async def handle_lifespan(scope, receive, send): """Handle ASGI lifespan events.""" while True: message = await receive() if message["type"] == "lifespan.startup": # Store startup time in state if available if "state" in scope: scope["state"]["started_at"] = time.time() await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return async def handle_root(scope, receive, send): """Handle root path - basic response.""" body = b"Hello, ASGI!" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain; charset=utf-8"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_echo(scope, receive, send): """Echo back the request body.""" # Read the full request body body_parts = [] while True: message = await receive() body = message.get("body", b"") if body: body_parts.append(body) if not message.get("more_body", False): break response_body = b"".join(body_parts) await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/octet-stream"), (b"content-length", str(len(response_body)).encode()), ], }) await send({ "type": "http.response.body", "body": response_body, "more_body": False, }) async def handle_headers(scope, receive, send): """Return request headers as JSON.""" # Drain request body await drain_body(receive) # Convert headers to JSON-serializable format headers_dict = {} for name, value in scope["headers"]: name_str = name.decode("latin-1") value_str = value.decode("latin-1") if name_str in headers_dict: # Handle multiple headers with same name if isinstance(headers_dict[name_str], list): headers_dict[name_str].append(value_str) else: headers_dict[name_str] = [headers_dict[name_str], value_str] else: headers_dict[name_str] = value_str response_body = json.dumps(headers_dict, indent=2).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(response_body)).encode()), ], }) await send({ "type": "http.response.body", "body": response_body, "more_body": False, }) async def handle_scope(scope, receive, send): """Return ASGI scope as JSON for inspection.""" # Drain request body await drain_body(receive) # Create a JSON-serializable version of the scope scope_json = { "type": scope["type"], "asgi": scope["asgi"], "http_version": scope["http_version"], "method": scope["method"], "scheme": scope["scheme"], "path": scope["path"], "raw_path": scope["raw_path"].decode("latin-1") if scope.get("raw_path") else None, "query_string": scope["query_string"].decode("latin-1") if scope.get("query_string") else "", "root_path": scope.get("root_path", ""), "headers": [ [name.decode("latin-1"), value.decode("latin-1")] for name, value in scope["headers"] ], "server": list(scope["server"]) if scope.get("server") else None, "client": list(scope["client"]) if scope.get("client") else None, } # Include extensions if present if "extensions" in scope: scope_json["extensions"] = {} for ext_name, ext_value in scope["extensions"].items(): if isinstance(ext_value, dict): scope_json["extensions"][ext_name] = ext_value else: scope_json["extensions"][ext_name] = str(ext_value) # Include state keys if present if "state" in scope: scope_json["state_keys"] = list(scope["state"].keys()) response_body = json.dumps(scope_json, indent=2).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(response_body)).encode()), ], }) await send({ "type": "http.response.body", "body": response_body, "more_body": False, }) async def handle_status(scope, receive, send): """Return specific HTTP status code from query parameter.""" # Drain request body await drain_body(receive) # Parse query string for status code query = scope["query_string"].decode("latin-1") status_code = 200 for param in query.split("&"): if param.startswith("code="): try: status_code = int(param[5:]) except ValueError: status_code = 400 # Status code validation if status_code < 100 or status_code >= 600: status_code = 400 body = f"Status: {status_code}".encode("utf-8") await send({ "type": "http.response.start", "status": status_code, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_large(scope, receive, send): """Return a large response (1MB by default).""" # Drain request body await drain_body(receive) # Parse query string for size query = scope["query_string"].decode("latin-1") size = 1024 * 1024 # 1MB default for param in query.split("&"): if param.startswith("size="): try: size = int(param[5:]) # Limit to 10MB size = min(size, 10 * 1024 * 1024) except ValueError: pass # Generate response body body = b"x" * size await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/octet-stream"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_method(scope, receive, send): """Return the HTTP method used.""" # Drain request body await drain_body(receive) method = scope["method"] body = json.dumps({"method": method}).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_query(scope, receive, send): """Return parsed query parameters.""" # Drain request body await drain_body(receive) query = scope["query_string"].decode("latin-1") params = {} if query: for param in query.split("&"): if "=" in param: key, value = param.split("=", 1) # Handle multiple values for same key if key in params: if isinstance(params[key], list): params[key].append(value) else: params[key] = [params[key], value] else: params[key] = value else: params[param] = "" body = json.dumps({ "raw": query, "params": params, }).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_post_json(scope, receive, send): """Parse JSON body and return it.""" if scope["method"] != "POST": await send_error(send, 405, "Method Not Allowed") return # Read request body body_parts = [] while True: message = await receive() body = message.get("body", b"") if body: body_parts.append(body) if not message.get("more_body", False): break request_body = b"".join(body_parts) try: data = json.loads(request_body.decode("utf-8")) except (json.JSONDecodeError, UnicodeDecodeError) as e: await send_error(send, 400, f"Invalid JSON: {e}") return response = { "received": data, "type": type(data).__name__, } response_body = json.dumps(response).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(response_body)).encode()), ], }) await send({ "type": "http.response.body", "body": response_body, "more_body": False, }) async def handle_delay(scope, receive, send): """Respond after a delay (for timeout testing).""" import asyncio # Drain request body await drain_body(receive) # Parse delay from query string query = scope["query_string"].decode("latin-1") delay = 1.0 # Default 1 second for param in query.split("&"): if param.startswith("seconds="): try: delay = float(param[8:]) # Limit to 30 seconds delay = min(delay, 30.0) except ValueError: pass await asyncio.sleep(delay) body = json.dumps({"delayed": delay}).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_health(scope, receive, send): """Health check endpoint.""" await drain_body(receive) body = b"OK" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_early_hints(scope, receive, send): """Send 103 Early Hints before the response.""" await drain_body(receive) # Send 103 Early Hints await send({ "type": "http.response.informational", "status": 103, "headers": [ (b"link", b"; rel=preload; as=style"), (b"link", b"; rel=preload; as=script"), ], }) # Send actual response body = b"Response with Early Hints" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_cookies(scope, receive, send): """Set and return cookies.""" await drain_body(receive) # Parse query for cookie values to set query = scope["query_string"].decode("latin-1") cookies_to_set = [] for param in query.split("&"): if param.startswith("set="): cookie_value = param[4:] cookies_to_set.append((b"set-cookie", cookie_value.encode())) # Get existing cookies from request request_cookies = {} for name, value in scope["headers"]: if name == b"cookie": cookie_str = value.decode("latin-1") for cookie in cookie_str.split(";"): cookie = cookie.strip() if "=" in cookie: k, v = cookie.split("=", 1) request_cookies[k] = v response = { "request_cookies": request_cookies, "set_cookies": [c[1].decode() for c in cookies_to_set], } body = json.dumps(response).encode("utf-8") headers = [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ] headers.extend(cookies_to_set) await send({ "type": "http.response.start", "status": 200, "headers": headers, }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_redirect(scope, receive, send): """Redirect to another URL.""" await drain_body(receive) # Parse query for redirect target query = scope["query_string"].decode("latin-1") location = "/" status = 302 for param in query.split("&"): if param.startswith("to="): location = param[3:] elif param.startswith("status="): try: status = int(param[7:]) if status not in (301, 302, 303, 307, 308): status = 302 except ValueError: pass await send({ "type": "http.response.start", "status": status, "headers": [ (b"location", location.encode()), (b"content-length", b"0"), ], }) await send({ "type": "http.response.body", "body": b"", "more_body": False, }) async def handle_not_found(scope, receive, send): """Handle 404 Not Found.""" await drain_body(receive) await send_error(send, 404, "Not Found") async def drain_body(receive): """Drain the request body.""" while True: message = await receive() if not message.get("more_body", False): break async def send_error(send, status, message): """Send an error response.""" body = message.encode("utf-8") await send({ "type": "http.response.start", "status": status, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/lifespan_app.py000066400000000000000000000210001514360242400271600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Lifespan test application for ASGI compliance testing. Tests the ASGI lifespan protocol including startup, shutdown, and state sharing between lifespan and request handlers. """ import json import os import time # Module-level state to track lifespan events (fallback if scope state unavailable) _lifespan_state = { "startup_called": False, "startup_complete": False, "shutdown_called": False, "startup_time": None, "startup_count": 0, "request_count": 0, } async def app(scope, receive, send): """Main ASGI application with lifespan support.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) return if scope["type"] != "http": return path = scope["path"] if path == "/": await handle_root(scope, receive, send) elif path == "/state": await handle_state(scope, receive, send) elif path == "/lifespan-info": await handle_lifespan_info(scope, receive, send) elif path == "/counter": await handle_counter(scope, receive, send) elif path == "/health": await handle_health(scope, receive, send) else: await handle_not_found(scope, receive, send) async def handle_lifespan(scope, receive, send): """Handle ASGI lifespan protocol.""" global _lifespan_state while True: message = await receive() if message["type"] == "lifespan.startup": _lifespan_state["startup_called"] = True _lifespan_state["startup_time"] = time.time() _lifespan_state["startup_count"] += 1 # Check for failure trigger via environment if os.environ.get("LIFESPAN_FAIL_STARTUP") == "1": await send({ "type": "lifespan.startup.failed", "message": "Startup failed (triggered by environment)", }) return # Initialize state if available if "state" in scope: scope["state"]["lifespan_started"] = True scope["state"]["startup_time"] = _lifespan_state["startup_time"] scope["state"]["db_connection"] = "simulated_connection" scope["state"]["cache"] = {} scope["state"]["request_count"] = 0 _lifespan_state["startup_complete"] = True await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": _lifespan_state["shutdown_called"] = True # Cleanup state if available if "state" in scope: scope["state"]["lifespan_stopped"] = True scope["state"]["shutdown_time"] = time.time() await send({"type": "lifespan.shutdown.complete"}) return async def handle_root(scope, receive, send): """Root endpoint.""" await drain_body(receive) _lifespan_state["request_count"] += 1 # Increment request count in state if available if "state" in scope and "request_count" in scope["state"]: scope["state"]["request_count"] += 1 body = b"Lifespan Test App" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_state(scope, receive, send): """Return the current state (from scope or module-level).""" await drain_body(receive) _lifespan_state["request_count"] += 1 # Collect state information state_info = { "module_state": { "startup_called": _lifespan_state["startup_called"], "startup_complete": _lifespan_state["startup_complete"], "shutdown_called": _lifespan_state["shutdown_called"], "startup_time": _lifespan_state["startup_time"], "startup_count": _lifespan_state["startup_count"], "request_count": _lifespan_state["request_count"], }, "scope_state_available": "state" in scope, } if "state" in scope: # Serialize scope state (only simple types) scope_state = {} for key, value in scope["state"].items(): try: json.dumps(value) # Test if serializable scope_state[key] = value except (TypeError, ValueError): scope_state[key] = str(type(value).__name__) state_info["scope_state"] = scope_state body = json.dumps(state_info, indent=2, default=str).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_lifespan_info(scope, receive, send): """Return lifespan-specific information.""" await drain_body(receive) info = { "lifespan_supported": True, "startup_complete": _lifespan_state["startup_complete"], "scope_state_present": "state" in scope, "uptime_seconds": None, } if _lifespan_state["startup_time"]: info["uptime_seconds"] = time.time() - _lifespan_state["startup_time"] if "state" in scope: info["state_keys"] = list(scope["state"].keys()) if "db_connection" in scope["state"]: info["db_connection_status"] = "active" body = json.dumps(info, indent=2).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_counter(scope, receive, send): """Increment and return a counter (tests state persistence).""" await drain_body(receive) _lifespan_state["request_count"] += 1 counter_value = _lifespan_state["request_count"] # Also track in scope state if available if "state" in scope: scope["state"]["request_count"] = scope["state"].get("request_count", 0) + 1 counter_value = scope["state"]["request_count"] body = json.dumps({ "counter": counter_value, "source": "scope_state" if "state" in scope else "module_state", }).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_health(scope, receive, send): """Health check that verifies lifespan startup completed.""" await drain_body(receive) if not _lifespan_state["startup_complete"]: body = b"Lifespan not started" status = 503 else: body = b"OK" status = 200 await send({ "type": "http.response.start", "status": status, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_not_found(scope, receive, send): """Handle 404 Not Found.""" await drain_body(receive) await send_error(send, 404, "Not Found") async def drain_body(receive): """Drain the request body.""" while True: message = await receive() if not message.get("more_body", False): break async def send_error(send, status, message): """Send an error response.""" body = message.encode("utf-8") await send({ "type": "http.response.start", "status": status, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) # Application factory for explicit lifespan support def create_app(): """Create the ASGI application.""" return app benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/main_app.py000066400000000000000000000174031514360242400263170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Main ASGI application for compliance testing. Routes requests to appropriate test applications based on path prefix. This is the primary entry point for Docker-based integration tests. """ import json import time from .http_app import app as http_app from .websocket_app import app as websocket_app from .streaming_app import app as streaming_app from .lifespan_app import app as lifespan_app from .framework_apps import combined_app as framework_app # Global state for lifespan _app_state = { "started": False, "startup_time": None, } async def app(scope, receive, send): """Main routing application. Routes based on path prefix: - /http/* -> HTTP test endpoints - /ws/* -> WebSocket test endpoints - /stream/* -> Streaming test endpoints - /lifespan/* -> Lifespan test endpoints - /framework/* -> Framework integration tests - / -> Root with info - /health -> Health check """ if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) return path = scope.get("path", "") # WebSocket handling - check scope type if scope["type"] == "websocket": if path.startswith("/ws/"): await websocket_app(scope, receive, send) elif path.startswith("/framework/"): # Route to framework WebSocket handlers new_scope = dict(scope) new_scope["path"] = path[10:] or "/" new_scope["raw_path"] = new_scope["path"].encode("latin-1") await framework_app(new_scope, receive, send) else: await websocket_app(scope, receive, send) return # HTTP routing if scope["type"] == "http": if path == "/" or path == "": await handle_root(scope, receive, send) elif path == "/health": await handle_health(scope, receive, send) elif path == "/info": await handle_info(scope, receive, send) elif path.startswith("/http/"): # Route to HTTP app, stripping prefix new_scope = dict(scope) new_scope["path"] = path[5:] or "/" new_scope["raw_path"] = new_scope["path"].encode("latin-1") await http_app(new_scope, receive, send) elif path.startswith("/stream/"): # Route to streaming app, stripping prefix new_scope = dict(scope) new_scope["path"] = path[7:] or "/" new_scope["raw_path"] = new_scope["path"].encode("latin-1") await streaming_app(new_scope, receive, send) elif path.startswith("/lifespan/"): # Route to lifespan app, stripping prefix new_scope = dict(scope) new_scope["path"] = path[9:] or "/" new_scope["raw_path"] = new_scope["path"].encode("latin-1") await lifespan_app(new_scope, receive, send) elif path.startswith("/framework/"): # Route to framework app, stripping prefix new_scope = dict(scope) new_scope["path"] = path[10:] or "/" new_scope["raw_path"] = new_scope["path"].encode("latin-1") await framework_app(new_scope, receive, send) else: # Try direct routing to http_app for convenience await http_app(scope, receive, send) async def handle_lifespan(scope, receive, send): """Handle ASGI lifespan events.""" global _app_state while True: message = await receive() if message["type"] == "lifespan.startup": _app_state["started"] = True _app_state["startup_time"] = time.time() # Initialize state if available if "state" in scope: scope["state"]["main_app_started"] = True scope["state"]["startup_time"] = _app_state["startup_time"] await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": _app_state["started"] = False if "state" in scope: scope["state"]["main_app_started"] = False await send({"type": "lifespan.shutdown.complete"}) return async def handle_root(scope, receive, send): """Root endpoint with routing information.""" await drain_body(receive) info = { "app": "ASGI Compliance Testbed", "version": "1.0.0", "routes": { "/": "This info page", "/health": "Health check endpoint", "/info": "Detailed server info", "/http/*": "HTTP test endpoints", "/ws/*": "WebSocket test endpoints", "/stream/*": "Streaming test endpoints", "/lifespan/*": "Lifespan protocol tests", "/framework/*": "Framework integration tests", }, "http_endpoints": [ "/http/echo", "/http/headers", "/http/scope", "/http/status?code=XXX", "/http/large", "/http/method", "/http/query", "/http/post-json", "/http/delay", "/http/early-hints", "/http/cookies", "/http/redirect", ], "websocket_endpoints": [ "/ws/echo", "/ws/echo-binary", "/ws/subprotocol", "/ws/close?code=XXX", "/ws/scope", "/ws/reject", "/ws/ping", "/ws/broadcast", "/ws/large", "/ws/delay", ], "streaming_endpoints": [ "/stream/streaming", "/stream/sse", "/stream/chunked", "/stream/slow-stream", "/stream/large-stream", "/stream/ndjson", "/stream/echo-stream", ], "lifespan_endpoints": [ "/lifespan/state", "/lifespan/lifespan-info", "/lifespan/counter", "/lifespan/health", ], "framework_endpoints": [ "/framework/starlette/*", "/framework/fastapi/*", ], } body = json.dumps(info, indent=2).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_health(scope, receive, send): """Health check endpoint.""" await drain_body(receive) body = b"OK" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_info(scope, receive, send): """Detailed server information.""" await drain_body(receive) info = { "started": _app_state["started"], "startup_time": _app_state["startup_time"], "uptime": time.time() - _app_state["startup_time"] if _app_state["startup_time"] else None, "scope_state_available": "state" in scope, "asgi": scope.get("asgi", {}), "server": list(scope["server"]) if scope.get("server") else None, } if "state" in scope: info["state_keys"] = list(scope["state"].keys()) body = json.dumps(info, indent=2).encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/json"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def drain_body(receive): """Drain the request body.""" while True: message = await receive() if not message.get("more_body", False): break benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/streaming_app.py000066400000000000000000000314171514360242400273650ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Streaming test application for ASGI compliance testing. Provides endpoints for testing chunked transfer encoding, Server-Sent Events (SSE), and streaming responses. """ import asyncio import json import time async def app(scope, receive, send): """Main ASGI streaming application.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) return if scope["type"] != "http": return path = scope["path"] # Route to appropriate handler if path == "/streaming": await handle_streaming(scope, receive, send) elif path == "/sse": await handle_sse(scope, receive, send) elif path == "/chunked": await handle_chunked(scope, receive, send) elif path == "/slow-stream": await handle_slow_stream(scope, receive, send) elif path == "/large-stream": await handle_large_stream(scope, receive, send) elif path == "/infinite": await handle_infinite(scope, receive, send) elif path == "/echo-stream": await handle_echo_stream(scope, receive, send) elif path == "/ndjson": await handle_ndjson(scope, receive, send) elif path == "/health": await handle_health(scope, receive, send) else: await handle_not_found(scope, receive, send) async def handle_lifespan(scope, receive, send): """Handle ASGI lifespan events.""" while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return async def handle_streaming(scope, receive, send): """Basic streaming response without Content-Length.""" await drain_body(receive) # Parse chunk count from query query = scope["query_string"].decode("latin-1") chunks = 5 for param in query.split("&"): if param.startswith("chunks="): try: chunks = int(param[7:]) chunks = min(chunks, 100) except ValueError: pass # Start response without Content-Length (triggers chunked encoding) await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), # No content-length - server should use chunked encoding ], }) # Send chunks for i in range(chunks): chunk = f"Chunk {i + 1} of {chunks}\n".encode("utf-8") await send({ "type": "http.response.body", "body": chunk, "more_body": i < chunks - 1, }) # Final empty body to signal end (if not already done) if chunks == 0: await send({ "type": "http.response.body", "body": b"", "more_body": False, }) async def handle_sse(scope, receive, send): """Server-Sent Events stream.""" await drain_body(receive) # Parse event count from query query = scope["query_string"].decode("latin-1") events = 5 delay = 0.5 for param in query.split("&"): if param.startswith("events="): try: events = int(param[7:]) events = min(events, 100) except ValueError: pass elif param.startswith("delay="): try: delay = float(param[6:]) delay = min(delay, 5.0) except ValueError: pass # SSE response headers await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/event-stream"), (b"cache-control", b"no-cache"), (b"connection", b"keep-alive"), (b"x-accel-buffering", b"no"), # Disable nginx buffering ], }) # Send SSE events for i in range(events): event_data = { "id": i + 1, "total": events, "timestamp": time.time(), } # Format as SSE sse_message = f"id: {i + 1}\nevent: message\ndata: {json.dumps(event_data)}\n\n" await send({ "type": "http.response.body", "body": sse_message.encode("utf-8"), "more_body": i < events - 1, }) if i < events - 1: await asyncio.sleep(delay) # Send final empty body if needed if events == 0: await send({ "type": "http.response.body", "body": b"", "more_body": False, }) async def handle_chunked(scope, receive, send): """Explicit chunked response with variable chunk sizes.""" await drain_body(receive) # Parse parameters from query query = scope["query_string"].decode("latin-1") chunk_sizes = [100, 500, 1000, 50, 200] # Default varied sizes for param in query.split("&"): if param.startswith("sizes="): try: sizes_str = param[6:] chunk_sizes = [int(s) for s in sizes_str.split(",")] chunk_sizes = [min(s, 100000) for s in chunk_sizes] # 100KB max per chunk except ValueError: pass await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/octet-stream"), ], }) # Send chunks of specified sizes for i, size in enumerate(chunk_sizes): chunk = bytes([i % 256] * size) await send({ "type": "http.response.body", "body": chunk, "more_body": i < len(chunk_sizes) - 1, }) async def handle_slow_stream(scope, receive, send): """Slow streaming response with configurable delays.""" await drain_body(receive) query = scope["query_string"].decode("latin-1") chunks = 10 delay = 0.5 for param in query.split("&"): if param.startswith("chunks="): try: chunks = int(param[7:]) chunks = min(chunks, 50) except ValueError: pass elif param.startswith("delay="): try: delay = float(param[6:]) delay = min(delay, 5.0) except ValueError: pass await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), ], }) for i in range(chunks): timestamp = time.time() chunk = f"[{timestamp:.3f}] Slow chunk {i + 1}/{chunks}\n".encode("utf-8") await send({ "type": "http.response.body", "body": chunk, "more_body": i < chunks - 1, }) if i < chunks - 1: await asyncio.sleep(delay) async def handle_large_stream(scope, receive, send): """Stream a large response in chunks.""" await drain_body(receive) query = scope["query_string"].decode("latin-1") total_size = 1024 * 1024 # 1MB default chunk_size = 64 * 1024 # 64KB chunks for param in query.split("&"): if param.startswith("size="): try: total_size = int(param[5:]) total_size = min(total_size, 100 * 1024 * 1024) # 100MB max except ValueError: pass elif param.startswith("chunk="): try: chunk_size = int(param[6:]) chunk_size = min(chunk_size, 1024 * 1024) # 1MB max chunk except ValueError: pass await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/octet-stream"), ], }) sent = 0 while sent < total_size: remaining = total_size - sent current_chunk_size = min(chunk_size, remaining) chunk = b"x" * current_chunk_size sent += current_chunk_size await send({ "type": "http.response.body", "body": chunk, "more_body": sent < total_size, }) async def handle_infinite(scope, receive, send): """Infinite stream (until client disconnects or limit reached).""" await drain_body(receive) query = scope["query_string"].decode("latin-1") max_chunks = 1000 # Safety limit delay = 0.1 for param in query.split("&"): if param.startswith("max="): try: max_chunks = int(param[4:]) max_chunks = min(max_chunks, 10000) except ValueError: pass elif param.startswith("delay="): try: delay = float(param[6:]) delay = max(delay, 0.01) # Min 10ms except ValueError: pass await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), ], }) try: for i in range(max_chunks): chunk = f"Infinite stream chunk {i + 1}\n".encode("utf-8") await send({ "type": "http.response.body", "body": chunk, "more_body": i < max_chunks - 1, }) if i < max_chunks - 1: await asyncio.sleep(delay) except Exception: # Client disconnected pass async def handle_echo_stream(scope, receive, send): """Echo request body as a stream.""" # Start response immediately await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/octet-stream"), ], }) # Stream request body to response chunk_count = 0 while True: message = await receive() body = message.get("body", b"") more_body = message.get("more_body", False) if body: chunk_count += 1 # Add chunk info prefix prefix = f"[chunk {chunk_count}]: ".encode("utf-8") await send({ "type": "http.response.body", "body": prefix + body + b"\n", "more_body": True, }) if not more_body: break # Final chunk with summary summary = f"Total chunks received: {chunk_count}\n".encode("utf-8") await send({ "type": "http.response.body", "body": summary, "more_body": False, }) async def handle_ndjson(scope, receive, send): """Newline-delimited JSON stream.""" await drain_body(receive) query = scope["query_string"].decode("latin-1") records = 10 delay = 0.2 for param in query.split("&"): if param.startswith("records="): try: records = int(param[8:]) records = min(records, 1000) except ValueError: pass elif param.startswith("delay="): try: delay = float(param[6:]) delay = min(delay, 5.0) except ValueError: pass await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"application/x-ndjson"), ], }) for i in range(records): record = { "id": i + 1, "timestamp": time.time(), "data": f"Record {i + 1}", } line = json.dumps(record) + "\n" await send({ "type": "http.response.body", "body": line.encode("utf-8"), "more_body": i < records - 1, }) if i < records - 1 and delay > 0: await asyncio.sleep(delay) async def handle_health(scope, receive, send): """Health check endpoint.""" await drain_body(receive) body = b"OK" await send({ "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def handle_not_found(scope, receive, send): """Handle 404 Not Found.""" await drain_body(receive) body = b"Not Found" await send({ "type": "http.response.start", "status": 404, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) async def drain_body(receive): """Drain the request body.""" while True: message = await receive() if not message.get("more_body", False): break benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/apps/websocket_app.py000066400000000000000000000344021514360242400273570ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WebSocket test application for ASGI compliance testing. Provides various WebSocket endpoints to test RFC 6455 compliance, message handling, and protocol features. """ import json async def app(scope, receive, send): """Main ASGI WebSocket application with multiple test endpoints.""" if scope["type"] == "lifespan": await handle_lifespan(scope, receive, send) return if scope["type"] != "websocket": # Return 404 for non-WebSocket requests if scope["type"] == "http": await send_http_error(send, 404, "WebSocket endpoints only") return path = scope["path"] # Route to appropriate handler if path == "/ws/echo": await handle_echo(scope, receive, send) elif path == "/ws/echo-binary": await handle_echo_binary(scope, receive, send) elif path == "/ws/subprotocol": await handle_subprotocol(scope, receive, send) elif path.startswith("/ws/close"): await handle_close(scope, receive, send) elif path == "/ws/scope": await handle_scope(scope, receive, send) elif path == "/ws/reject": await handle_reject(scope, receive, send) elif path == "/ws/ping": await handle_ping(scope, receive, send) elif path == "/ws/broadcast": await handle_broadcast(scope, receive, send) elif path == "/ws/large": await handle_large_message(scope, receive, send) elif path == "/ws/fragmented": await handle_fragmented(scope, receive, send) elif path == "/ws/delay": await handle_delay(scope, receive, send) else: # Accept but immediately close for unknown paths await handle_unknown(scope, receive, send) async def handle_lifespan(scope, receive, send): """Handle ASGI lifespan events.""" while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return async def handle_echo(scope, receive, send): """Echo text messages back to the client.""" # Wait for connection message = await receive() if message["type"] != "websocket.connect": return # Accept the connection await send({"type": "websocket.accept"}) # Echo messages until disconnect while True: message = await receive() if message["type"] == "websocket.receive": # Echo back text messages if "text" in message: await send({ "type": "websocket.send", "text": message["text"], }) elif "bytes" in message: # Convert binary to text for echo await send({ "type": "websocket.send", "text": message["bytes"].decode("utf-8", errors="replace"), }) elif message["type"] == "websocket.disconnect": break async def handle_echo_binary(scope, receive, send): """Echo binary messages back to the client.""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) while True: message = await receive() if message["type"] == "websocket.receive": if "bytes" in message: await send({ "type": "websocket.send", "bytes": message["bytes"], }) elif "text" in message: # Convert text to binary for echo await send({ "type": "websocket.send", "bytes": message["text"].encode("utf-8"), }) elif message["type"] == "websocket.disconnect": break async def handle_subprotocol(scope, receive, send): """Negotiate WebSocket subprotocol.""" message = await receive() if message["type"] != "websocket.connect": return # Get requested subprotocols requested = scope.get("subprotocols", []) # Prefer graphql-ws, then json, then first available selected = None preferred = ["graphql-ws", "json", "wamp"] for proto in preferred: if proto in requested: selected = proto break if not selected and requested: selected = requested[0] # Accept with selected subprotocol accept_msg = {"type": "websocket.accept"} if selected: accept_msg["subprotocol"] = selected await send(accept_msg) # Send confirmation message response = { "requested": requested, "selected": selected, } await send({ "type": "websocket.send", "text": json.dumps(response), }) # Wait for disconnect while True: message = await receive() if message["type"] == "websocket.disconnect": break async def handle_close(scope, receive, send): """Close connection with specific code from query parameter.""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) # Parse close code from query string query = scope["query_string"].decode("latin-1") close_code = 1000 # Normal closure close_reason = "" for param in query.split("&"): if param.startswith("code="): try: close_code = int(param[5:]) except ValueError: pass elif param.startswith("reason="): close_reason = param[7:] # Send close with specified code close_msg = { "type": "websocket.close", "code": close_code, } if close_reason: close_msg["reason"] = close_reason await send(close_msg) async def handle_scope(scope, receive, send): """Return WebSocket scope as JSON.""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) # Create JSON-serializable scope scope_json = { "type": scope["type"], "asgi": scope["asgi"], "http_version": scope["http_version"], "scheme": scope["scheme"], "path": scope["path"], "raw_path": scope["raw_path"].decode("latin-1") if scope.get("raw_path") else None, "query_string": scope["query_string"].decode("latin-1") if scope.get("query_string") else "", "root_path": scope.get("root_path", ""), "headers": [ [name.decode("latin-1"), value.decode("latin-1")] for name, value in scope["headers"] ], "server": list(scope["server"]) if scope.get("server") else None, "client": list(scope["client"]) if scope.get("client") else None, "subprotocols": scope.get("subprotocols", []), } await send({ "type": "websocket.send", "text": json.dumps(scope_json, indent=2), }) # Wait for disconnect while True: message = await receive() if message["type"] == "websocket.disconnect": break async def handle_reject(scope, receive, send): """Reject the WebSocket connection.""" message = await receive() if message["type"] != "websocket.connect": return # Close without accepting - this rejects the connection await send({ "type": "websocket.close", "code": 1008, # Policy violation "reason": "Connection rejected", }) async def handle_ping(scope, receive, send): """Echo ping messages (handled at protocol level, but test app behavior).""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) # Send a message indicating ping/pong is handled at protocol level await send({ "type": "websocket.send", "text": json.dumps({ "info": "Ping/pong is handled at the protocol level", "note": "Send any message to test echo", }), }) while True: message = await receive() if message["type"] == "websocket.receive": # Echo back if "text" in message: await send({"type": "websocket.send", "text": message["text"]}) elif "bytes" in message: await send({"type": "websocket.send", "bytes": message["bytes"]}) elif message["type"] == "websocket.disconnect": break async def handle_broadcast(scope, receive, send): """Simple broadcast simulation - echo message multiple times.""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) # Parse broadcast count from query query = scope["query_string"].decode("latin-1") count = 3 # Default for param in query.split("&"): if param.startswith("count="): try: count = int(param[6:]) count = min(count, 100) # Limit except ValueError: pass while True: message = await receive() if message["type"] == "websocket.receive": text = message.get("text", "") # "Broadcast" by sending multiple copies for i in range(count): await send({ "type": "websocket.send", "text": json.dumps({ "copy": i + 1, "of": count, "message": text, }), }) elif message["type"] == "websocket.disconnect": break async def handle_large_message(scope, receive, send): """Test large message handling.""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) # Parse size from query query = scope["query_string"].decode("latin-1") size = 64 * 1024 # 64KB default for param in query.split("&"): if param.startswith("size="): try: size = int(param[5:]) size = min(size, 1024 * 1024) # 1MB limit except ValueError: pass # Send large message large_data = "x" * size await send({ "type": "websocket.send", "text": large_data, }) # Echo any received messages while True: message = await receive() if message["type"] == "websocket.receive": if "text" in message: response = { "received_length": len(message["text"]), "sent_length": size, } await send({ "type": "websocket.send", "text": json.dumps(response), }) elif message["type"] == "websocket.disconnect": break async def handle_fragmented(scope, receive, send): """Test fragmented message handling (assembled by protocol).""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) await send({ "type": "websocket.send", "text": json.dumps({ "info": "Fragmented frames are assembled at protocol level", "note": "This app receives complete messages", }), }) # Echo messages with length info while True: message = await receive() if message["type"] == "websocket.receive": if "text" in message: await send({ "type": "websocket.send", "text": json.dumps({ "received": message["text"], "length": len(message["text"]), "type": "text", }), }) elif "bytes" in message: await send({ "type": "websocket.send", "text": json.dumps({ "length": len(message["bytes"]), "type": "binary", }), }) elif message["type"] == "websocket.disconnect": break async def handle_delay(scope, receive, send): """Test delayed responses.""" import asyncio message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) # Parse delay from query query = scope["query_string"].decode("latin-1") delay = 1.0 for param in query.split("&"): if param.startswith("seconds="): try: delay = float(param[8:]) delay = min(delay, 30.0) # 30s limit except ValueError: pass while True: message = await receive() if message["type"] == "websocket.receive": await asyncio.sleep(delay) if "text" in message: await send({ "type": "websocket.send", "text": json.dumps({ "delayed_by": delay, "message": message["text"], }), }) elif message["type"] == "websocket.disconnect": break async def handle_unknown(scope, receive, send): """Handle unknown WebSocket paths - accept then close.""" message = await receive() if message["type"] != "websocket.connect": return await send({"type": "websocket.accept"}) await send({ "type": "websocket.send", "text": json.dumps({ "error": "Unknown path", "path": scope["path"], }), }) await send({ "type": "websocket.close", "code": 1000, }) async def send_http_error(send, status, message): """Send HTTP error response (for non-WebSocket requests).""" body = message.encode("utf-8") await send({ "type": "http.response.start", "status": status, "headers": [ (b"content-type", b"text/plain"), (b"content-length", str(len(body)).encode()), ], }) await send({ "type": "http.response.body", "body": body, "more_body": False, }) benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/certs/000077500000000000000000000000001514360242400243315ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/certs/server.crt000066400000000000000000000024261514360242400263550ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDkzCCAnugAwIBAgIUHdlZ9co55+sdCalL7KLszmMTEzgwDQYJKoZIhvcNAQEL BQAwOTESMBAGA1UEAwwJbG9jYWxob3N0MRYwFAYDVQQKDA1HdW5pY29ybiBUZXN0 MQswCQYDVQQGEwJVUzAeFw0yNjAyMDIxMDIwMDRaFw0yNjAyMDMxMDIwMDRaMDkx EjAQBgNVBAMMCWxvY2FsaG9zdDEWMBQGA1UECgwNR3VuaWNvcm4gVGVzdDELMAkG A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS07vGRH7C i7LBJp5fn+i/vQoaE7y9MVqTN4SH1iSJgUti6fAYBQkCGsC1X0QDHaffsH17p5zV DY6pNEdpOfM9cbIhtWl078jTsSsuHRnBg2g3zcyaXNN7voruKoAgrN5gTBpY1yAx iW7s431EwEBd4MGQm++FOn83Dw2uAa5Xfdf4HMo4EDwAfVLir89th63L9q3rxGGY t+C1XzQ54t2EnHpOycnDkgAlRogRC8Js+14eVwSZcsWTqEHLp9lal74BTRpY9GiS mktm4p71IBqqB1dnIByii2kBNuCzJDhAFdLqjLv81iZirfZx0pGfcvR6iARCLKLA OOcB7jz5rycLAgMBAAGjgZIwgY8wHQYDVR0OBBYEFN8wH1VLJd6rbI53UgHM2xSD e99DMB8GA1UdIwQYMBaAFN8wH1VLJd6rbI53UgHM2xSDe99DMA8GA1UdEwEB/wQF MAMBAf8wPAYDVR0RBDUwM4IJbG9jYWxob3N0gg1ndW5pY29ybi1hc2dpghFndW5p Y29ybi1hc2dpLXNzbIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAMe8So/3/bGe7 n/xoeij6BZrX3O1hTNy3iUeAxuhyLS9o00Z7B9swgwiPnHz3/2JnxXzZH5XXX5XI DbT36LY2CzPERYkmWmo5w2JZ8wneN/J/LuLF5djpjwM+ItLZlDNUnZoETqWmsur1 Y0e+G3lUN9dc3XchOq7ONqmoWGNDzlO/LGytnLBhsw5v4mnKeDSwPeD2CdAQ8Cl0 zcdYOibetAG4nLsrDvFYPxYNtQGNsAKji/Wg1pc9WtbSBFennW0T9pFKuYBAavdQ KHzlYBexBiGNWWu5XlXpA7YMFm2Na8m3C4A/oxIiJL3lc1i+GlxQ2cTKNIPekwbH fjKuvNNfcw== -----END CERTIFICATE----- benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/certs/server.key000066400000000000000000000032501514360242400263510ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS07vGRH7Ci7LB Jp5fn+i/vQoaE7y9MVqTN4SH1iSJgUti6fAYBQkCGsC1X0QDHaffsH17p5zVDY6p NEdpOfM9cbIhtWl078jTsSsuHRnBg2g3zcyaXNN7voruKoAgrN5gTBpY1yAxiW7s 431EwEBd4MGQm++FOn83Dw2uAa5Xfdf4HMo4EDwAfVLir89th63L9q3rxGGYt+C1 XzQ54t2EnHpOycnDkgAlRogRC8Js+14eVwSZcsWTqEHLp9lal74BTRpY9GiSmktm 4p71IBqqB1dnIByii2kBNuCzJDhAFdLqjLv81iZirfZx0pGfcvR6iARCLKLAOOcB 7jz5rycLAgMBAAECggEABuX6s1NM1BzLrcCpVsOquZHIuzwa5Ud9VgSioR2dEHEn zR+OeM3uLFPa9/q00c1Hz7mJdMeLo16c/mUn7DkczM19k9cvXi4oyhR2YnBKH9tA e7yPwOh5dKEYqdy/vuuPhmMdfmHiCDAd1KgU7AnGXjJoFjOkALeYFgfq3Xi2Naw1 qMqWnCjKoR/0WmCqozrQ4KAh8GD105D7bB69kP4qwNz5HYbfWLFI4naY6EPmc4wV coadcK2GKjGQSWc+EAmimc7nVdogR2RrA3TGwEc+dAUn8oYHWfz8uK9SRZpNQTI4 S5sqNQL5UsrM0NYcSFW6eg0bhBl0YmqyHr7bfdL+EQKBgQDrxNUtgPR5oDEQTO8p rwpzVbdfeJBmtr4Aw0LZn5DAEX8LxrbwESK9jnClEHCoCosgWZuYrJ+c5716Rlid vdKQkqOfpf+4JW88oMTjuzYR0wFJ5rHC3+OctCzjgYrfanhALzhQKqiHhCrcjWPZ xm0lxz1oMKQoDNoT7ab+UhhRHQKBgQDk6wA/CBJ/JqOQ2+wTifqpK65PlTDaA5e4 qEQrQ66kOhVpdTaDOMgtdwvsBSQ1t5CL3b8ytO9gGRBBXVlri8518F5fJrlRRBDX TP5hkJXOw/gpJAiCie7dPpChu7nDkq6JxmzMEYw5wf1AIzYwarauNRWDVPyaU/nD rJY/GTrIRwKBgAuF1DFkIw6qsJsuV2X/IxCd+NdWqiALAGBDKso+DTIF6OKndJtp CvyesIywsADWexQ6rOsaTLa7cLxAIeabt2XPdOXBlCzoz3X0GYtTxAG9AUweVUPD 83jeKW95DlN6/aONa0AnxZLR99JNqrqjAwScpzinX+6BKktdCxNU6dFVAoGAPIwD lqhV7BeWL5xbhpd6GwCYrCfzsdY9bPPkg+T07i8GtsvvzSlZmNzh5F0/xI12x+ew yIKexbYbXI6KNi3WP8+Bxn0BiwMLyUZuCfQqC3Q90PPc5FoDObVwn7Z9XcMQMxSu dhM2GZi7mRk3Hfs7sjwMIp556X/Ikf62Bp5vs8UCgYEA1nGfXK6DpMGlnEDno3X2 cWHV1MgDE6ojR0GHMsQQvuQVj/cHNgCJmDEBtTlq7/cM7HPPmNSCBteHfDQQ7UXy ViEQgo6p9NOByr73zmxlhGEirHE/hUmF8qOHYBvjgPU+jVEKF5yRiz0T3sC5Z3bQ AhTGjfXfHsH7SvdrQQNl4DE= -----END PRIVATE KEY----- benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/conftest.py000066400000000000000000000277261514360242400254260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Pytest fixtures for ASGI compliance Docker integration tests.""" import subprocess import time import socket from pathlib import Path import pytest # Directory containing this conftest.py DOCKER_DIR = Path(__file__).parent CERTS_DIR = DOCKER_DIR / "certs" def generate_self_signed_cert(certs_dir: Path) -> None: """Generate self-signed SSL certificates for testing.""" certs_dir.mkdir(parents=True, exist_ok=True) cert_file = certs_dir / "server.crt" key_file = certs_dir / "server.key" # Skip if certs already exist and are recent (less than 1 day old) if cert_file.exists() and key_file.exists(): age = time.time() - cert_file.stat().st_mtime if age < 86400: # 1 day return # Generate self-signed certificate subprocess.run( [ "openssl", "req", "-x509", "-newkey", "rsa:2048", "-keyout", str(key_file), "-out", str(cert_file), "-days", "1", "-nodes", "-subj", "/CN=localhost/O=Gunicorn Test/C=US", "-addext", "subjectAltName=DNS:localhost,DNS:gunicorn-asgi,DNS:gunicorn-asgi-ssl,IP:127.0.0.1" ], check=True, capture_output=True ) # Set readable permissions cert_file.chmod(0o644) key_file.chmod(0o644) def wait_for_http_service(host: str, port: int, timeout: int = 60) -> bool: """Wait for an HTTP service to become available.""" start_time = time.time() while time.time() - start_time < timeout: try: with socket.create_connection((host, port), timeout=5): return True except (socket.error, OSError): time.sleep(1) return False def wait_for_https_service(host: str, port: int, timeout: int = 60) -> bool: """Wait for an HTTPS service to become available.""" import ssl start_time = time.time() while time.time() - start_time < timeout: try: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE with socket.create_connection((host, port), timeout=5) as sock: with ctx.wrap_socket(sock, server_hostname=host): return True except (socket.error, ssl.SSLError, OSError): time.sleep(1) return False @pytest.fixture(scope="session") def docker_compose_file(): """Return the path to docker-compose.yml.""" return DOCKER_DIR / "docker-compose.yml" @pytest.fixture(scope="session") def certs_dir(): """Generate and return the certs directory.""" generate_self_signed_cert(CERTS_DIR) return CERTS_DIR @pytest.fixture(scope="session") def docker_services(docker_compose_file, certs_dir): """Start Docker services for the test session.""" compose_file = str(docker_compose_file) # Check if Docker is available try: subprocess.run( ["docker", "info"], check=True, capture_output=True ) except (subprocess.CalledProcessError, FileNotFoundError): pytest.skip("Docker is not available") # Check if docker compose is available try: subprocess.run( ["docker", "compose", "version"], check=True, capture_output=True ) except subprocess.CalledProcessError: pytest.skip("Docker Compose is not available") # Build and start services try: subprocess.run( ["docker", "compose", "-f", compose_file, "build"], check=True, cwd=DOCKER_DIR ) subprocess.run( ["docker", "compose", "-f", compose_file, "up", "-d"], check=True, cwd=DOCKER_DIR ) # Wait for services to be healthy gunicorn_http_ready = wait_for_http_service("127.0.0.1", 8000, timeout=60) gunicorn_https_ready = wait_for_https_service("127.0.0.1", 8445, timeout=60) nginx_http_ready = wait_for_http_service("127.0.0.1", 8080, timeout=60) nginx_https_ready = wait_for_https_service("127.0.0.1", 8444, timeout=60) if not gunicorn_http_ready: result = subprocess.run( ["docker", "compose", "-f", compose_file, "logs", "gunicorn-asgi"], capture_output=True, text=True, cwd=DOCKER_DIR ) pytest.fail(f"Gunicorn HTTP service failed to start. Logs:\n{result.stdout}\n{result.stderr}") if not gunicorn_https_ready: result = subprocess.run( ["docker", "compose", "-f", compose_file, "logs", "gunicorn-asgi-ssl"], capture_output=True, text=True, cwd=DOCKER_DIR ) pytest.fail(f"Gunicorn HTTPS service failed to start. Logs:\n{result.stdout}\n{result.stderr}") if not nginx_http_ready or not nginx_https_ready: result = subprocess.run( ["docker", "compose", "-f", compose_file, "logs", "nginx-proxy"], capture_output=True, text=True, cwd=DOCKER_DIR ) pytest.fail(f"Nginx service failed to start. Logs:\n{result.stdout}\n{result.stderr}") yield { "gunicorn_http": "http://127.0.0.1:8000", "gunicorn_https": "https://127.0.0.1:8445", "nginx_http": "http://127.0.0.1:8080", "nginx_https": "https://127.0.0.1:8444", } finally: # Stop and remove services subprocess.run( ["docker", "compose", "-f", compose_file, "down", "-v", "--remove-orphans"], cwd=DOCKER_DIR, capture_output=True ) # ============================================================================ # URL Fixtures # ============================================================================ @pytest.fixture def gunicorn_url(docker_services): """Return the gunicorn HTTP service URL.""" return docker_services["gunicorn_http"] @pytest.fixture def gunicorn_ssl_url(docker_services): """Return the gunicorn HTTPS service URL.""" return docker_services["gunicorn_https"] @pytest.fixture def nginx_url(docker_services): """Return the nginx HTTP proxy URL.""" return docker_services["nginx_http"] @pytest.fixture def nginx_ssl_url(docker_services): """Return the nginx HTTPS proxy URL.""" return docker_services["nginx_https"] # ============================================================================ # HTTP Client Fixtures # ============================================================================ @pytest.fixture def http_client(): """Create a standard HTTP client.""" httpx = pytest.importorskip("httpx") client = httpx.Client(verify=False, timeout=30.0, follow_redirects=False) yield client client.close() @pytest.fixture def http2_client(): """Create an HTTP/2 capable client.""" httpx = pytest.importorskip("httpx") client = httpx.Client(http2=True, verify=False, timeout=30.0) yield client client.close() @pytest.fixture async def async_http_client(): """Create an async HTTP client.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(verify=False, timeout=30.0) as client: yield client @pytest.fixture def async_http_client_factory(): """Factory for creating async HTTP clients.""" httpx = pytest.importorskip("httpx") async def create_client(**kwargs): defaults = {"verify": False, "timeout": 30.0} defaults.update(kwargs) return httpx.AsyncClient(**defaults) return create_client # ============================================================================ # WebSocket Client Fixtures # ============================================================================ @pytest.fixture def websocket_connect(): """Factory for creating WebSocket connections.""" websockets = pytest.importorskip("websockets") async def connect(url, **kwargs): """Connect to a WebSocket endpoint. Args: url: WebSocket URL (ws:// or wss://) **kwargs: Additional arguments for websockets.connect() Returns: WebSocket connection """ import ssl # Default SSL context for wss:// if url.startswith("wss://") and "ssl" not in kwargs: ssl_context = ssl.create_default_context() ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE kwargs["ssl"] = ssl_context return await websockets.connect(url, **kwargs) return connect # ============================================================================ # Streaming Client Fixtures # ============================================================================ @pytest.fixture def sse_client(): """Create a client for Server-Sent Events.""" httpx = pytest.importorskip("httpx") class SSEClient: def __init__(self): self.client = httpx.Client(verify=False, timeout=60.0) def stream(self, url): """Stream SSE events from URL.""" with self.client.stream("GET", url, headers={"Accept": "text/event-stream"}) as response: buffer = "" for chunk in response.iter_text(): buffer += chunk while "\n\n" in buffer: event, buffer = buffer.split("\n\n", 1) yield self._parse_event(event) def _parse_event(self, event_text): """Parse an SSE event.""" event = {"data": None, "event": None, "id": None} for line in event_text.strip().split("\n"): if line.startswith("data: "): event["data"] = line[6:] elif line.startswith("event: "): event["event"] = line[7:] elif line.startswith("id: "): event["id"] = line[4:] return event def close(self): self.client.close() client = SSEClient() yield client client.close() @pytest.fixture def streaming_client(): """Create a client for chunked/streaming responses.""" httpx = pytest.importorskip("httpx") class StreamingClient: def __init__(self): self.client = httpx.Client(verify=False, timeout=60.0) def stream_chunks(self, url, method="GET", **kwargs): """Stream response chunks from URL.""" with self.client.stream(method, url, **kwargs) as response: for chunk in response.iter_bytes(): if chunk: yield chunk def stream_lines(self, url, method="GET", **kwargs): """Stream response lines from URL.""" with self.client.stream(method, url, **kwargs) as response: for line in response.iter_lines(): yield line def close(self): self.client.close() client = StreamingClient() yield client client.close() # ============================================================================ # Test Markers # ============================================================================ def pytest_configure(config): """Configure custom pytest markers.""" config.addinivalue_line("markers", "docker: tests requiring Docker") config.addinivalue_line("markers", "asgi: ASGI-related tests") config.addinivalue_line("markers", "websocket: WebSocket tests") config.addinivalue_line("markers", "streaming: Streaming response tests") config.addinivalue_line("markers", "lifespan: Lifespan protocol tests") config.addinivalue_line("markers", "framework: Framework integration tests") config.addinivalue_line("markers", "concurrency: Concurrency tests") config.addinivalue_line("markers", "http2: HTTP/2 specific tests") config.addinivalue_line("markers", "ssl: SSL/TLS tests") config.addinivalue_line("markers", "integration: Integration tests") benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/docker-compose.yml000066400000000000000000000034541514360242400266540ustar00rootroot00000000000000services: gunicorn-asgi: build: context: ../../../ dockerfile: tests/docker/asgi_compliance/Dockerfile.gunicorn ports: - "8000:8000" # HTTP - "8443:8443" # HTTPS volumes: - ./certs:/certs:ro - ./apps:/app/apps:ro environment: - GUNICORN_CERTFILE=/certs/server.crt - GUNICORN_KEYFILE=/certs/server.key healthcheck: test: ["CMD", "python", "-c", "import socket; s=socket.socket(); s.settimeout(2); s.connect(('localhost',8000)); s.close()"] interval: 2s timeout: 5s retries: 15 start_period: 5s gunicorn-asgi-ssl: build: context: ../../../ dockerfile: tests/docker/asgi_compliance/Dockerfile.gunicorn ports: - "8445:8443" volumes: - ./certs:/certs:ro - ./apps:/app/apps:ro environment: - GUNICORN_CERTFILE=/certs/server.crt - GUNICORN_KEYFILE=/certs/server.key - USE_SSL=1 healthcheck: test: ["CMD", "python", "-c", "import ssl,socket; s=socket.socket(); s.settimeout(2); ctx=ssl.create_default_context(); ctx.check_hostname=False; ctx.verify_mode=ssl.CERT_NONE; ss=ctx.wrap_socket(s,server_hostname='localhost'); ss.connect(('localhost',8443)); ss.close()"] interval: 2s timeout: 5s retries: 15 start_period: 5s nginx-proxy: build: context: . dockerfile: Dockerfile.nginx ports: - "8080:8080" # HTTP proxy - "8444:8444" # HTTPS proxy volumes: - ./certs:/certs:ro - ./nginx.conf:/etc/nginx/nginx.conf:ro depends_on: gunicorn-asgi: condition: service_healthy healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/health"] interval: 2s timeout: 5s retries: 15 start_period: 5s networks: default: driver: bridge benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/nginx.conf000066400000000000000000000161371514360242400252130ustar00rootroot00000000000000worker_processes auto; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; # Use Docker DNS resolver, IPv4 only to avoid IPv6 connection issues resolver 127.0.0.11 ipv6=off valid=10s; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; keepalive_timeout 65; # Map for WebSocket upgrade - use empty string for non-WebSocket to enable keepalive map $http_upgrade $connection_upgrade { default upgrade; '' ''; } upstream gunicorn_asgi { server gunicorn-asgi:8000 max_fails=0; keepalive 32; } upstream gunicorn_asgi_ssl { server gunicorn-asgi-ssl:8443 max_fails=0; keepalive 32; } # HTTP server (port 8080) server { listen 8080; server_name localhost; # Increase body size limit for large request tests client_max_body_size 100m; # Health check endpoint location /health { return 200 'OK'; add_header Content-Type text/plain; } # WebSocket locations location /ws/ { proxy_pass http://gunicorn_asgi; proxy_http_version 1.1; # WebSocket upgrade headers proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; # Standard proxy headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; # WebSocket timeouts proxy_connect_timeout 60s; proxy_send_timeout 300s; proxy_read_timeout 300s; } # Streaming locations - disable buffering location /stream/ { proxy_pass http://gunicorn_asgi; proxy_http_version 1.1; # Disable buffering for streaming proxy_buffering off; proxy_cache off; # SSE specific proxy_set_header Connection ''; chunked_transfer_encoding on; # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Accel-Buffering no; # Longer timeouts for streaming proxy_connect_timeout 60s; proxy_send_timeout 300s; proxy_read_timeout 300s; } # Default location location / { proxy_pass http://gunicorn_asgi; proxy_http_version 1.1; # Retry on connection errors proxy_next_upstream error timeout http_502; proxy_next_upstream_tries 2; # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Port $server_port; # Support WebSocket upgrade if requested proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; # Buffering settings proxy_buffering on; proxy_buffer_size 4k; proxy_buffers 8 4k; # Timeouts proxy_connect_timeout 60s; proxy_send_timeout 60s; proxy_read_timeout 60s; } } # HTTPS server (port 8444) server { listen 8444 ssl; http2 on; server_name localhost; ssl_certificate /certs/server.crt; ssl_certificate_key /certs/server.key; ssl_protocols TLSv1.2 TLSv1.3; ssl_ciphers HIGH:!aNULL:!MD5; ssl_prefer_server_ciphers on; # HTTP/2 settings http2_max_concurrent_streams 128; # Increase body size limit client_max_body_size 100m; # Health check endpoint location /health { return 200 'OK'; add_header Content-Type text/plain; } # WebSocket locations (over HTTPS) location /ws/ { proxy_pass http://gunicorn_asgi; proxy_http_version 1.1; # WebSocket upgrade headers proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; # Standard proxy headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; # WebSocket timeouts proxy_connect_timeout 60s; proxy_send_timeout 300s; proxy_read_timeout 300s; } # Streaming locations - disable buffering location /stream/ { proxy_pass http://gunicorn_asgi; proxy_http_version 1.1; # Disable buffering for streaming proxy_buffering off; proxy_cache off; # SSE specific proxy_set_header Connection ''; chunked_transfer_encoding on; # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Accel-Buffering no; # Longer timeouts for streaming proxy_connect_timeout 60s; proxy_send_timeout 300s; proxy_read_timeout 300s; } # Default location location / { proxy_pass http://gunicorn_asgi; proxy_http_version 1.1; # Retry on connection errors proxy_next_upstream error timeout http_502; proxy_next_upstream_tries 2; # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Port $server_port; # Support WebSocket upgrade if requested proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; # Buffering settings proxy_buffering on; proxy_buffer_size 4k; proxy_buffers 8 4k; # Timeouts proxy_connect_timeout 60s; proxy_send_timeout 60s; proxy_read_timeout 60s; } } } benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_concurrency.py000066400000000000000000000422671514360242400271670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Concurrency integration tests for ASGI. Tests concurrent connections, mixed protocols, and load handling. """ import asyncio import json import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.concurrency, pytest.mark.integration, ] # ============================================================================ # Concurrent HTTP Requests # ============================================================================ @pytest.mark.asyncio class TestConcurrentHTTP: """Test concurrent HTTP request handling.""" async def test_concurrent_simple_requests(self, async_http_client_factory, gunicorn_url): """Test many concurrent simple requests.""" async with await async_http_client_factory() as client: async def make_request(i): response = await client.get(f"{gunicorn_url}/http/") return response.status_code, i tasks = [make_request(i) for i in range(50)] results = await asyncio.gather(*tasks) # All should succeed assert all(status == 200 for status, _ in results) async def test_concurrent_echo_requests(self, async_http_client_factory, gunicorn_url): """Test concurrent echo requests with unique data.""" async with await async_http_client_factory() as client: async def echo_request(i): data = f"request_{i}" response = await client.post( f"{gunicorn_url}/http/echo", content=data.encode() ) return response.text == data, i tasks = [echo_request(i) for i in range(30)] results = await asyncio.gather(*tasks) # All should echo correctly assert all(success for success, _ in results) async def test_concurrent_different_endpoints(self, async_http_client_factory, gunicorn_url): """Test concurrent requests to different endpoints.""" async with await async_http_client_factory() as client: async def get_root(): return await client.get(f"{gunicorn_url}/http/") async def get_headers(): return await client.get(f"{gunicorn_url}/http/headers") async def get_scope(): return await client.get(f"{gunicorn_url}/http/scope") async def get_health(): return await client.get(f"{gunicorn_url}/http/health") # Mix of different endpoints tasks = [ get_root(), get_headers(), get_scope(), get_health(), get_root(), get_headers(), get_scope(), get_health(), get_root(), get_headers(), get_scope(), get_health(), ] results = await asyncio.gather(*tasks) assert all(r.status_code == 200 for r in results) async def test_concurrent_with_delays(self, async_http_client_factory, gunicorn_url): """Test concurrent requests with varying delays.""" async with await async_http_client_factory(timeout=30.0) as client: async def delayed_request(delay_ms): response = await client.get( f"{gunicorn_url}/http/delay?ms={delay_ms}" ) return response.status_code == 200 # Various delays delays = [100, 200, 50, 150, 100, 200, 50] tasks = [delayed_request(d) for d in delays] results = await asyncio.gather(*tasks) assert all(results) # ============================================================================ # Concurrent WebSocket Connections # ============================================================================ @pytest.mark.asyncio class TestConcurrentWebSocket: """Test concurrent WebSocket connections.""" async def test_many_concurrent_websockets(self, websocket_connect, gunicorn_url): """Test many concurrent WebSocket connections.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async def ws_echo(i): async with await websocket_connect(ws_url) as ws: message = f"concurrent_{i}" await ws.send(message) response = await ws.recv() return response == message tasks = [ws_echo(i) for i in range(20)] results = await asyncio.gather(*tasks) assert all(results) async def test_concurrent_websocket_many_messages(self, websocket_connect, gunicorn_url): """Test concurrent WebSocket connections with many messages each.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async def ws_multiple_messages(conn_id): async with await websocket_connect(ws_url) as ws: for i in range(10): message = f"conn_{conn_id}_msg_{i}" await ws.send(message) response = await ws.recv() if response != message: return False return True tasks = [ws_multiple_messages(i) for i in range(10)] results = await asyncio.gather(*tasks) assert all(results) # ============================================================================ # Mixed Protocol Tests # ============================================================================ @pytest.mark.asyncio class TestMixedProtocols: """Test mixed HTTP and WebSocket concurrent access.""" async def test_http_and_websocket_concurrent( self, async_http_client_factory, websocket_connect, gunicorn_url ): """Test concurrent HTTP and WebSocket requests.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async def http_request(client): response = await client.get(f"{gunicorn_url}/http/") return response.status_code == 200 async def websocket_echo(): async with await websocket_connect(ws_url) as ws: await ws.send("mixed") response = await ws.recv() return response == "mixed" async with await async_http_client_factory() as client: # Interleaved HTTP and WebSocket tasks tasks = [ http_request(client), websocket_echo(), http_request(client), websocket_echo(), http_request(client), websocket_echo(), ] results = await asyncio.gather(*tasks) assert all(results) async def test_streaming_and_http_concurrent( self, async_http_client_factory, gunicorn_url ): """Test concurrent streaming and regular HTTP requests.""" async with await async_http_client_factory(timeout=60.0) as client: async def regular_request(): response = await client.get(f"{gunicorn_url}/http/") return response.status_code == 200 async def streaming_request(): async with client.stream( "GET", f"{gunicorn_url}/stream/streaming?chunks=5" ) as response: chunks = [] async for chunk in response.aiter_bytes(): chunks.append(chunk) return len(chunks) > 0 tasks = [ regular_request(), streaming_request(), regular_request(), streaming_request(), regular_request(), ] results = await asyncio.gather(*tasks) assert all(results) # ============================================================================ # Connection Reuse Tests # ============================================================================ @pytest.mark.asyncio class TestConnectionReuse: """Test connection reuse and keep-alive.""" async def test_many_requests_single_client( self, async_http_client_factory, gunicorn_url ): """Test many sequential requests on single client.""" async with await async_http_client_factory() as client: for i in range(100): response = await client.get(f"{gunicorn_url}/http/?iter={i}") assert response.status_code == 200 async def test_keep_alive_stress(self, async_http_client_factory, gunicorn_url): """Test keep-alive under stress.""" async with await async_http_client_factory() as client: # Rapid sequential requests for _ in range(50): tasks = [ client.get(f"{gunicorn_url}/http/"), client.get(f"{gunicorn_url}/http/headers"), ] results = await asyncio.gather(*tasks) assert all(r.status_code == 200 for r in results) # ============================================================================ # Load Tests # ============================================================================ @pytest.mark.asyncio class TestLoad: """Test load handling.""" async def test_burst_requests(self, async_http_client_factory, gunicorn_url): """Test handling burst of requests.""" async with await async_http_client_factory() as client: async def burst(): tasks = [ client.get(f"{gunicorn_url}/http/") for _ in range(100) ] return await asyncio.gather(*tasks, return_exceptions=True) results = await burst() # Count successful responses success = sum( 1 for r in results if not isinstance(r, Exception) and r.status_code == 200 ) # Most should succeed (allow for some failures under load) assert success >= 90, f"Only {success}/100 requests succeeded" async def test_sustained_load(self, async_http_client_factory, gunicorn_url): """Test sustained load over time.""" async with await async_http_client_factory() as client: success_count = 0 total = 0 # 5 iterations of 20 concurrent requests for _ in range(5): tasks = [ client.get(f"{gunicorn_url}/http/") for _ in range(20) ] results = await asyncio.gather(*tasks, return_exceptions=True) for r in results: total += 1 if not isinstance(r, Exception) and r.status_code == 200: success_count += 1 # Small delay between batches await asyncio.sleep(0.1) # High success rate expected assert success_count / total >= 0.95 # ============================================================================ # Resource Exhaustion Tests # ============================================================================ @pytest.mark.asyncio class TestResourceHandling: """Test handling of resource constraints.""" async def test_many_small_requests(self, async_http_client_factory, gunicorn_url): """Test many small requests.""" async with await async_http_client_factory() as client: tasks = [ client.get(f"{gunicorn_url}/http/health") for _ in range(200) ] results = await asyncio.gather(*tasks, return_exceptions=True) success = sum( 1 for r in results if not isinstance(r, Exception) and r.status_code == 200 ) assert success >= 180 # Allow some failures async def test_concurrent_large_responses( self, async_http_client_factory, gunicorn_url ): """Test concurrent large response handling.""" async with await async_http_client_factory(timeout=60.0) as client: async def large_request(): response = await client.get( f"{gunicorn_url}/stream/large-stream?size=102400" # 100KB ) return len(response.content) == 102400 tasks = [large_request() for _ in range(10)] results = await asyncio.gather(*tasks) assert all(results) # ============================================================================ # Proxy Concurrency Tests # ============================================================================ @pytest.mark.asyncio class TestProxyConcurrency: """Test concurrent access through proxy.""" async def test_proxy_concurrent_http(self, async_http_client_factory, nginx_url): """Test concurrent HTTP through proxy.""" async with await async_http_client_factory() as client: tasks = [ client.get(f"{nginx_url}/http/") for _ in range(30) ] results = await asyncio.gather(*tasks, return_exceptions=True) # Allow for some failures in concurrent proxy requests successes = [r for r in results if not isinstance(r, Exception) and r.status_code == 200] assert len(successes) >= 25 # At least 25/30 should succeed async def test_proxy_concurrent_websocket(self, websocket_connect, nginx_url): """Test concurrent WebSocket through proxy.""" ws_url = nginx_url.replace("http://", "ws://") + "/ws/echo" async def ws_echo(i): async with await websocket_connect(ws_url) as ws: await ws.send(f"proxy_{i}") response = await ws.recv() return response == f"proxy_{i}" tasks = [ws_echo(i) for i in range(10)] results = await asyncio.gather(*tasks) assert all(results) # ============================================================================ # HTTPS Concurrency Tests # ============================================================================ @pytest.mark.ssl @pytest.mark.asyncio class TestHTTPSConcurrency: """Test concurrent HTTPS access.""" async def test_https_concurrent_http( self, async_http_client_factory, gunicorn_ssl_url ): """Test concurrent HTTPS requests.""" async with await async_http_client_factory() as client: tasks = [ client.get(f"{gunicorn_ssl_url}/http/") for _ in range(20) ] results = await asyncio.gather(*tasks) assert all(r.status_code == 200 for r in results) async def test_https_concurrent_websocket( self, websocket_connect, gunicorn_ssl_url ): """Test concurrent WebSocket over HTTPS.""" ws_url = gunicorn_ssl_url.replace("https://", "wss://") + "/ws/echo" async def ws_echo(i): async with await websocket_connect(ws_url) as ws: await ws.send(f"secure_{i}") response = await ws.recv() return response == f"secure_{i}" tasks = [ws_echo(i) for i in range(10)] results = await asyncio.gather(*tasks) assert all(results) # ============================================================================ # Stress Tests # ============================================================================ @pytest.mark.asyncio class TestStress: """Stress tests for edge cases.""" async def test_rapid_connect_disconnect( self, async_http_client_factory, gunicorn_url ): """Test rapid connection and disconnection.""" for _ in range(20): async with await async_http_client_factory() as client: response = await client.get(f"{gunicorn_url}/http/") assert response.status_code == 200 async def test_rapid_websocket_connect_disconnect( self, websocket_connect, gunicorn_url ): """Test rapid WebSocket connect/disconnect.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" for i in range(20): async with await websocket_connect(ws_url) as ws: await ws.send(f"rapid_{i}") response = await ws.recv() assert response == f"rapid_{i}" async def test_mixed_success_and_error_paths( self, async_http_client_factory, gunicorn_url ): """Test mixed success and error responses concurrently.""" async with await async_http_client_factory() as client: async def success_request(): return await client.get(f"{gunicorn_url}/http/") async def error_request(): return await client.get(f"{gunicorn_url}/http/status?code=500") async def not_found_request(): return await client.get(f"{gunicorn_url}/http/nonexistent") tasks = [ success_request(), error_request(), not_found_request(), success_request(), error_request(), not_found_request(), ] results = await asyncio.gather(*tasks) # Check expected status codes expected = [200, 500, 404, 200, 500, 404] for result, expected_status in zip(results, expected): assert result.status_code == expected_status benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_framework_integration.py000066400000000000000000000473141514360242400312330ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Framework integration tests for ASGI. Tests integration with popular ASGI frameworks like Starlette and FastAPI. """ import json import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.framework, pytest.mark.integration, ] # ============================================================================ # Framework Availability Tests # ============================================================================ class TestFrameworkAvailability: """Test framework availability.""" def test_framework_root_endpoint(self, http_client, gunicorn_url): """Test framework root returns available frameworks.""" response = http_client.get(f"{gunicorn_url}/framework/") assert response.status_code == 200 data = response.json() assert "apps" in data assert "starlette" in data["apps"] assert "fastapi" in data["apps"] def test_framework_health(self, http_client, gunicorn_url): """Test framework health endpoint.""" response = http_client.get(f"{gunicorn_url}/framework/health") assert response.status_code == 200 # ============================================================================ # Starlette Integration Tests # ============================================================================ class TestStarletteBasic: """Test basic Starlette integration.""" def test_starlette_homepage(self, http_client, gunicorn_url): """Test Starlette homepage.""" response = http_client.get(f"{gunicorn_url}/framework/starlette/") if response.status_code == 503: pytest.skip("Starlette not available in container") assert response.status_code == 200 assert "Starlette" in response.text def test_starlette_json(self, http_client, gunicorn_url): """Test Starlette JSON response.""" response = http_client.get(f"{gunicorn_url}/framework/starlette/json") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "starlette" def test_starlette_json_query_params(self, http_client, gunicorn_url): """Test Starlette query parameters.""" response = http_client.get(f"{gunicorn_url}/framework/starlette/json?foo=bar&baz=123") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["query_params"]["foo"] == "bar" assert data["query_params"]["baz"] == "123" def test_starlette_echo(self, http_client, gunicorn_url): """Test Starlette echo endpoint.""" body = "Hello Starlette!" response = http_client.post( f"{gunicorn_url}/framework/starlette/echo", content=body.encode() ) if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 assert body in response.text def test_starlette_headers(self, http_client, gunicorn_url): """Test Starlette headers endpoint.""" response = http_client.get( f"{gunicorn_url}/framework/starlette/headers", headers={"X-Custom-Header": "custom-value"} ) if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert "x-custom-header" in data assert data["x-custom-header"] == "custom-value" def test_starlette_scope(self, http_client, gunicorn_url): """Test Starlette scope endpoint.""" response = http_client.get(f"{gunicorn_url}/framework/starlette/scope") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["type"] == "http" assert "asgi" in data def test_starlette_health(self, http_client, gunicorn_url): """Test Starlette health endpoint.""" response = http_client.get(f"{gunicorn_url}/framework/starlette/health") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 class TestStarletteStreaming: """Test Starlette streaming functionality.""" def test_starlette_streaming(self, http_client, gunicorn_url): """Test Starlette streaming response.""" response = http_client.get(f"{gunicorn_url}/framework/starlette/streaming") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 assert "Chunk" in response.text def test_starlette_streaming_chunks(self, streaming_client, gunicorn_url): """Test Starlette streaming returns multiple chunks.""" try: chunks = list(streaming_client.stream_chunks( f"{gunicorn_url}/framework/starlette/streaming" )) except Exception: pytest.skip("Starlette not available") full_content = b"".join(chunks).decode("utf-8") if "Framework not available" in full_content: pytest.skip("Starlette not available") assert "Chunk 1" in full_content assert "Chunk 10" in full_content class TestStarletteWebSocket: """Test Starlette WebSocket functionality.""" @pytest.mark.asyncio async def test_starlette_websocket_echo(self, websocket_connect, gunicorn_url): """Test Starlette WebSocket echo.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/framework/starlette/ws/echo" try: async with await websocket_connect(ws_url) as ws: await ws.send("hello starlette") response = await ws.recv() assert "Starlette echo: hello starlette" in response except Exception as e: if "403" in str(e) or "404" in str(e): pytest.skip("Starlette WebSocket not available") raise # ============================================================================ # FastAPI Integration Tests # ============================================================================ class TestFastAPIBasic: """Test basic FastAPI integration.""" def test_fastapi_homepage(self, http_client, gunicorn_url): """Test FastAPI homepage.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/") if response.status_code == 503: pytest.skip("FastAPI not available in container") assert response.status_code == 200 data = response.json() assert "FastAPI" in data.get("message", "") def test_fastapi_json(self, http_client, gunicorn_url): """Test FastAPI JSON response.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/json") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "fastapi" def test_fastapi_json_query_params(self, http_client, gunicorn_url): """Test FastAPI query parameters.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/json?foo=bar&num=42") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["query_params"]["foo"] == "bar" assert data["query_params"]["num"] == "42" def test_fastapi_echo(self, http_client, gunicorn_url): """Test FastAPI echo endpoint.""" body = "Hello FastAPI!" response = http_client.post( f"{gunicorn_url}/framework/fastapi/echo", content=body.encode() ) if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["echo"] == body assert data["length"] == len(body) def test_fastapi_headers(self, http_client, gunicorn_url): """Test FastAPI headers endpoint.""" response = http_client.get( f"{gunicorn_url}/framework/fastapi/headers", headers={"X-FastAPI-Header": "fastapi-value"} ) if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert "x-fastapi-header" in data assert data["x-fastapi-header"] == "fastapi-value" def test_fastapi_scope(self, http_client, gunicorn_url): """Test FastAPI scope endpoint.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/scope") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["type"] == "http" assert "asgi" in data def test_fastapi_health(self, http_client, gunicorn_url): """Test FastAPI health endpoint.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/health") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["status"] == "ok" class TestFastAPIPathParameters: """Test FastAPI path parameters.""" def test_path_parameter_int(self, http_client, gunicorn_url): """Test FastAPI path parameter with integer.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/items/42") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["item_id"] == 42 def test_path_parameter_with_query(self, http_client, gunicorn_url): """Test FastAPI path parameter with query string.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/items/123?q=search") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["item_id"] == 123 assert data["query"] == "search" def test_create_item(self, http_client, gunicorn_url): """Test FastAPI create item endpoint.""" item = {"name": "Test Item", "price": 99.99} response = http_client.post( f"{gunicorn_url}/framework/fastapi/items/", json=item ) if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["created"] == item class TestFastAPIStreaming: """Test FastAPI streaming functionality.""" def test_fastapi_streaming(self, http_client, gunicorn_url): """Test FastAPI streaming response.""" response = http_client.get(f"{gunicorn_url}/framework/fastapi/streaming") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 assert "Chunk" in response.text def test_fastapi_streaming_chunks(self, streaming_client, gunicorn_url): """Test FastAPI streaming returns multiple chunks.""" try: chunks = list(streaming_client.stream_chunks( f"{gunicorn_url}/framework/fastapi/streaming" )) except Exception: pytest.skip("FastAPI not available") full_content = b"".join(chunks).decode("utf-8") if "Framework not available" in full_content: pytest.skip("FastAPI not available") assert "Chunk 1" in full_content assert "Chunk 10" in full_content class TestFastAPIWebSocket: """Test FastAPI WebSocket functionality.""" @pytest.mark.asyncio async def test_fastapi_websocket_echo(self, websocket_connect, gunicorn_url): """Test FastAPI WebSocket echo.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/framework/fastapi/ws/echo" try: async with await websocket_connect(ws_url) as ws: await ws.send("hello fastapi") response = await ws.recv() assert "FastAPI echo: hello fastapi" in response except Exception as e: if "403" in str(e) or "404" in str(e): pytest.skip("FastAPI WebSocket not available") raise # ============================================================================ # Cross-Framework Tests # ============================================================================ class TestCrossFramework: """Test cross-framework functionality.""" def test_both_frameworks_available(self, http_client, gunicorn_url): """Test both frameworks are available.""" response = http_client.get(f"{gunicorn_url}/framework/") assert response.status_code == 200 data = response.json() starlette_available = data["apps"]["starlette"]["available"] fastapi_available = data["apps"]["fastapi"]["available"] # At least one should be available (container should have them) # If neither available, skip if not starlette_available and not fastapi_available: pytest.skip("No frameworks available") def test_framework_independence(self, http_client, gunicorn_url): """Test frameworks work independently.""" # Check framework root first root_response = http_client.get(f"{gunicorn_url}/framework/") if root_response.status_code != 200: pytest.skip("Frameworks not available") data = root_response.json() if data["apps"]["starlette"]["available"]: starlette_response = http_client.get(f"{gunicorn_url}/framework/starlette/health") assert starlette_response.status_code == 200 if data["apps"]["fastapi"]["available"]: fastapi_response = http_client.get(f"{gunicorn_url}/framework/fastapi/health") assert fastapi_response.status_code == 200 # ============================================================================ # Proxy Framework Tests # ============================================================================ class TestProxyFramework: """Test frameworks through nginx proxy.""" def test_proxy_framework_root(self, http_client, nginx_url): """Test framework root through proxy.""" response = http_client.get(f"{nginx_url}/framework/") assert response.status_code == 200 data = response.json() assert "apps" in data def test_proxy_starlette(self, http_client, nginx_url): """Test Starlette through proxy.""" response = http_client.get(f"{nginx_url}/framework/starlette/json") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "starlette" def test_proxy_fastapi(self, http_client, nginx_url): """Test FastAPI through proxy.""" response = http_client.get(f"{nginx_url}/framework/fastapi/json") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "fastapi" # ============================================================================ # HTTPS Framework Tests # ============================================================================ @pytest.mark.ssl class TestHTTPSFramework: """Test frameworks over HTTPS.""" def test_https_starlette(self, http_client, gunicorn_ssl_url): """Test Starlette over HTTPS.""" response = http_client.get(f"{gunicorn_ssl_url}/framework/starlette/json") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "starlette" def test_https_fastapi(self, http_client, gunicorn_ssl_url): """Test FastAPI over HTTPS.""" response = http_client.get(f"{gunicorn_ssl_url}/framework/fastapi/json") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "fastapi" def test_https_proxy_starlette(self, http_client, nginx_ssl_url): """Test Starlette through HTTPS proxy.""" response = http_client.get(f"{nginx_ssl_url}/framework/starlette/health") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 def test_https_proxy_fastapi(self, http_client, nginx_ssl_url): """Test FastAPI through HTTPS proxy.""" import time response = None # Retry up to 3 times for intermittent proxy connectivity issues for attempt in range(3): response = http_client.get(f"{nginx_ssl_url}/framework/fastapi/health") if response.status_code == 503: pytest.skip("FastAPI not available") if response.status_code == 200: break time.sleep(0.5) assert response.status_code == 200 data = response.json() assert data["status"] == "ok" # ============================================================================ # Async Framework Tests # ============================================================================ @pytest.mark.asyncio class TestAsyncFramework: """Test frameworks with async client.""" async def test_async_starlette(self, async_http_client_factory, gunicorn_url): """Test Starlette with async client.""" async with await async_http_client_factory() as client: response = await client.get(f"{gunicorn_url}/framework/starlette/json") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "starlette" async def test_async_fastapi(self, async_http_client_factory, gunicorn_url): """Test FastAPI with async client.""" async with await async_http_client_factory() as client: response = await client.get(f"{gunicorn_url}/framework/fastapi/json") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "fastapi" async def test_concurrent_framework_requests(self, async_http_client_factory, gunicorn_url): """Test concurrent requests to both frameworks.""" import asyncio async with await async_http_client_factory() as client: async def get_starlette(): response = await client.get(f"{gunicorn_url}/framework/starlette/json") return response.status_code, "starlette" async def get_fastapi(): response = await client.get(f"{gunicorn_url}/framework/fastapi/json") return response.status_code, "fastapi" results = await asyncio.gather( get_starlette(), get_fastapi(), get_starlette(), get_fastapi(), ) # All should either succeed (200) or framework unavailable (503) for status, name in results: assert status in [200, 503], f"{name} returned {status}" benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_http2_asgi.py000066400000000000000000000360561514360242400267000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP/2 ASGI integration tests. Tests HTTP/2 specific functionality with ASGI applications. """ import json import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.http2, pytest.mark.integration, ] # ============================================================================ # HTTP/2 Basic Tests # ============================================================================ class TestHTTP2Basic: """Test basic HTTP/2 functionality with ASGI.""" def test_http2_request(self, http2_client, nginx_ssl_url): """Test HTTP/2 request through nginx.""" response = http2_client.get(f"{nginx_ssl_url}/http/") assert response.status_code == 200 # HTTP/2 is negotiated via ALPN on TLS assert response.http_version in ["HTTP/2", "HTTP/1.1"] def test_http2_scope(self, http2_client, nginx_ssl_url): """Test ASGI scope with HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() # HTTP version in scope should reflect what the app sees # (may be 1.1 if nginx proxies as HTTP/1.1 to backend) assert data["http_version"] in ["1.1", "2", "1.0"] def test_http2_headers(self, http2_client, nginx_ssl_url): """Test headers work correctly over HTTP/2.""" response = http2_client.get( f"{nginx_ssl_url}/http/headers", headers={ "X-Custom-Header": "http2-value", "X-Another-Header": "another-value", } ) assert response.status_code == 200 data = response.json() assert "x-custom-header" in data assert data["x-custom-header"] == "http2-value" # ============================================================================ # HTTP/2 Multiplexing Tests # ============================================================================ @pytest.mark.asyncio class TestHTTP2Multiplexing: """Test HTTP/2 multiplexing features.""" async def test_concurrent_requests_single_connection( self, async_http_client_factory, nginx_ssl_url ): """Test concurrent requests on single HTTP/2 connection.""" import asyncio async with await async_http_client_factory(http2=True) as client: async def make_request(i): response = await client.get(f"{nginx_ssl_url}/http/?req={i}") return response.status_code == 200, i # HTTP/2 allows multiple concurrent streams tasks = [make_request(i) for i in range(20)] results = await asyncio.gather(*tasks) assert all(success for success, _ in results) async def test_interleaved_requests( self, async_http_client_factory, nginx_ssl_url ): """Test interleaved request/response on HTTP/2.""" import asyncio async with await async_http_client_factory(http2=True) as client: async def fast_request(): return await client.get(f"{nginx_ssl_url}/http/health") async def slow_request(): return await client.get(f"{nginx_ssl_url}/http/delay?ms=100") # Mix of fast and slow requests tasks = [ slow_request(), fast_request(), slow_request(), fast_request(), fast_request(), ] results = await asyncio.gather(*tasks) assert all(r.status_code == 200 for r in results) # ============================================================================ # HTTP/2 Streaming Tests # ============================================================================ class TestHTTP2Streaming: """Test HTTP/2 streaming with ASGI.""" def test_http2_streaming_response(self, http2_client, nginx_ssl_url): """Test streaming response over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/stream/streaming?chunks=5") assert response.status_code == 200 assert "Chunk" in response.text def test_http2_sse(self, http2_client, nginx_ssl_url): """Test Server-Sent Events over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/stream/sse?events=3&delay=0.1") assert response.status_code == 200 assert "text/event-stream" in response.headers.get("content-type", "") def test_http2_large_response(self, http2_client, nginx_ssl_url): """Test large response over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/stream/large-stream?size=102400") assert response.status_code == 200 assert len(response.content) == 102400 # ============================================================================ # HTTP/2 POST/Body Tests # ============================================================================ class TestHTTP2RequestBody: """Test HTTP/2 request body handling.""" def test_http2_post_json(self, http2_client, nginx_ssl_url): """Test POST with JSON body over HTTP/2.""" data = {"message": "http2 post", "number": 42} response = http2_client.post( f"{nginx_ssl_url}/http/post-json", json=data ) assert response.status_code == 200 result = response.json() assert result["received"]["message"] == "http2 post" def test_http2_post_echo(self, http2_client, nginx_ssl_url): """Test echo endpoint over HTTP/2.""" body = b"HTTP/2 echo test body" response = http2_client.post( f"{nginx_ssl_url}/http/echo", content=body ) assert response.status_code == 200 assert response.content == body def test_http2_large_request_body(self, http2_client, nginx_ssl_url): """Test large request body over HTTP/2.""" body = b"x" * 100000 # 100KB response = http2_client.post( f"{nginx_ssl_url}/http/echo", content=body ) assert response.status_code == 200 assert len(response.content) == 100000 # ============================================================================ # HTTP/2 ASGI Scope Tests # ============================================================================ class TestHTTP2ASGIScope: """Test ASGI scope properties with HTTP/2.""" def test_scope_type_http(self, http2_client, nginx_ssl_url): """Test scope type is HTTP.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() assert data["type"] == "http" def test_scope_asgi_version(self, http2_client, nginx_ssl_url): """Test ASGI version in scope.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() assert "asgi" in data assert "version" in data["asgi"] def test_scope_scheme_https(self, http2_client, nginx_ssl_url): """Test scheme is HTTPS in scope.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() # Scope scheme reflects what app sees (may be http if proxy strips TLS) assert data["scheme"] in ["http", "https"] def test_scope_method_preserved(self, http2_client, nginx_ssl_url): """Test HTTP method is preserved in scope.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() assert data["method"] == "GET" def test_scope_path_preserved(self, http2_client, nginx_ssl_url): """Test path is preserved in scope.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() # Path is stripped by main_app router (/http prefix removed) assert data["path"] == "/scope" def test_scope_query_string(self, http2_client, nginx_ssl_url): """Test query string in scope.""" response = http2_client.get(f"{nginx_ssl_url}/http/scope?foo=bar&baz=qux") assert response.status_code == 200 data = response.json() assert "foo=bar" in data["query_string"] # ============================================================================ # HTTP/2 Framework Tests # ============================================================================ class TestHTTP2Framework: """Test frameworks over HTTP/2.""" def test_http2_starlette(self, http2_client, nginx_ssl_url): """Test Starlette over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/framework/starlette/json") if response.status_code == 503: pytest.skip("Starlette not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "starlette" def test_http2_fastapi(self, http2_client, nginx_ssl_url): """Test FastAPI over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/framework/fastapi/json") if response.status_code == 503: pytest.skip("FastAPI not available") assert response.status_code == 200 data = response.json() assert data["framework"] == "fastapi" # ============================================================================ # HTTP/2 Error Handling Tests # ============================================================================ class TestHTTP2Errors: """Test HTTP/2 error handling.""" def test_http2_404(self, http2_client, nginx_ssl_url): """Test 404 over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/http/nonexistent") assert response.status_code == 404 def test_http2_500(self, http2_client, nginx_ssl_url): """Test 500 over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/http/status?code=500") assert response.status_code == 500 def test_http2_various_status_codes(self, http2_client, nginx_ssl_url): """Test various status codes over HTTP/2.""" for code in [200, 201, 204, 301, 400, 403, 404, 500, 503]: response = http2_client.get( f"{nginx_ssl_url}/http/status?code={code}", follow_redirects=False ) assert response.status_code == code # ============================================================================ # HTTP/2 Concurrent Async Tests # ============================================================================ @pytest.mark.asyncio class TestHTTP2Async: """Test async HTTP/2 operations.""" async def test_async_http2_streaming( self, async_http_client_factory, nginx_ssl_url ): """Test async streaming over HTTP/2.""" async with await async_http_client_factory(http2=True) as client: chunks = [] async with client.stream( "GET", f"{nginx_ssl_url}/stream/streaming?chunks=5" ) as response: async for chunk in response.aiter_bytes(): chunks.append(chunk) full_content = b"".join(chunks).decode("utf-8") assert "Chunk" in full_content async def test_async_http2_concurrent_streams( self, async_http_client_factory, nginx_ssl_url ): """Test concurrent HTTP/2 streams.""" import asyncio async with await async_http_client_factory(http2=True) as client: async def stream_request(i): response = await client.get( f"{nginx_ssl_url}/stream/streaming?chunks=3" ) return i, "Chunk" in response.text tasks = [stream_request(i) for i in range(10)] results = await asyncio.gather(*tasks) assert all(success for _, success in results) async def test_async_http2_mixed_requests( self, async_http_client_factory, nginx_ssl_url ): """Test mixed request types over HTTP/2.""" import asyncio async with await async_http_client_factory(http2=True) as client: async def get_request(): return await client.get(f"{nginx_ssl_url}/http/") async def post_request(): return await client.post( f"{nginx_ssl_url}/http/echo", content=b"test" ) async def stream_request(): response = await client.get( f"{nginx_ssl_url}/stream/streaming?chunks=2" ) return response tasks = [ get_request(), post_request(), stream_request(), get_request(), post_request(), ] results = await asyncio.gather(*tasks) assert all(r.status_code == 200 for r in results) # ============================================================================ # HTTP/2 Lifespan Tests # ============================================================================ class TestHTTP2Lifespan: """Test lifespan app over HTTP/2.""" def test_http2_lifespan_state(self, http2_client, nginx_ssl_url): """Test lifespan state over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/lifespan/state") assert response.status_code == 200 data = response.json() # main_app handles lifespan, so check scope_state not module_state assert data["scope_state"]["main_app_started"] is True def test_http2_lifespan_counter(self, http2_client, nginx_ssl_url): """Test lifespan counter over HTTP/2.""" response = http2_client.get(f"{nginx_ssl_url}/lifespan/counter") assert response.status_code == 200 data = response.json() assert "counter" in data # ============================================================================ # HTTP/2 Direct (No Proxy) Tests # ============================================================================ @pytest.mark.ssl class TestHTTP2Direct: """Test HTTP/2 directly to gunicorn (if supported).""" def test_direct_https_request(self, http_client, gunicorn_ssl_url): """Test direct HTTPS request to gunicorn.""" response = http_client.get(f"{gunicorn_ssl_url}/http/") assert response.status_code == 200 def test_direct_https_scope(self, http_client, gunicorn_ssl_url): """Test scope from direct HTTPS connection.""" response = http_client.get(f"{gunicorn_ssl_url}/http/scope") assert response.status_code == 200 data = response.json() assert data["type"] == "http" # Direct connection should show https scheme assert data["scheme"] == "https" def test_direct_https_streaming(self, http_client, gunicorn_ssl_url): """Test streaming from direct HTTPS connection.""" response = http_client.get(f"{gunicorn_ssl_url}/stream/streaming?chunks=3") assert response.status_code == 200 assert "Chunk" in response.text benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_http_compliance.py000066400000000000000000000502741514360242400300030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ HTTP compliance integration tests for ASGI. Tests HTTP request/response handling, headers, methods, status codes, and ASGI scope correctness through actual HTTP requests. """ import json import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.integration, ] # ============================================================================ # Basic HTTP Request/Response Tests # ============================================================================ class TestBasicHTTPRequests: """Test basic HTTP request/response functionality.""" def test_root_endpoint(self, http_client, gunicorn_url): """Test root endpoint returns expected response.""" response = http_client.get(f"{gunicorn_url}/") assert response.status_code == 200 assert "ASGI Compliance Testbed" in response.text def test_health_endpoint(self, http_client, gunicorn_url): """Test health check endpoint.""" response = http_client.get(f"{gunicorn_url}/health") assert response.status_code == 200 assert response.text == "OK" def test_http_app_root(self, http_client, gunicorn_url): """Test HTTP app root endpoint.""" response = http_client.get(f"{gunicorn_url}/http/") assert response.status_code == 200 assert response.text == "Hello, ASGI!" def test_not_found(self, http_client, gunicorn_url): """Test 404 response for unknown paths.""" response = http_client.get(f"{gunicorn_url}/http/nonexistent") assert response.status_code == 404 class TestHTTPMethods: """Test various HTTP methods.""" def test_get_method(self, http_client, gunicorn_url): """Test GET method.""" response = http_client.get(f"{gunicorn_url}/http/method") assert response.status_code == 200 data = response.json() assert data["method"] == "GET" def test_post_method(self, http_client, gunicorn_url): """Test POST method.""" response = http_client.post(f"{gunicorn_url}/http/method") assert response.status_code == 200 data = response.json() assert data["method"] == "POST" def test_put_method(self, http_client, gunicorn_url): """Test PUT method.""" response = http_client.put(f"{gunicorn_url}/http/method") assert response.status_code == 200 data = response.json() assert data["method"] == "PUT" def test_delete_method(self, http_client, gunicorn_url): """Test DELETE method.""" response = http_client.delete(f"{gunicorn_url}/http/method") assert response.status_code == 200 data = response.json() assert data["method"] == "DELETE" def test_patch_method(self, http_client, gunicorn_url): """Test PATCH method.""" response = http_client.patch(f"{gunicorn_url}/http/method") assert response.status_code == 200 data = response.json() assert data["method"] == "PATCH" def test_head_method(self, http_client, gunicorn_url): """Test HEAD method returns no body.""" response = http_client.head(f"{gunicorn_url}/http/") assert response.status_code == 200 assert response.content == b"" def test_options_method(self, http_client, gunicorn_url): """Test OPTIONS method.""" response = http_client.options(f"{gunicorn_url}/http/method") assert response.status_code == 200 data = response.json() assert data["method"] == "OPTIONS" class TestHTTPStatusCodes: """Test HTTP status code responses.""" @pytest.mark.parametrize("status_code", [ 200, 201, 202, 204, 301, 302, 304, 400, 401, 403, 404, 405, 500, 502, 503 ]) def test_status_codes(self, http_client, gunicorn_url, status_code): """Test various HTTP status codes.""" response = http_client.get(f"{gunicorn_url}/http/status?code={status_code}") assert response.status_code == status_code def test_invalid_status_code(self, http_client, gunicorn_url): """Test invalid status code returns 400.""" response = http_client.get(f"{gunicorn_url}/http/status?code=999") assert response.status_code == 400 # ============================================================================ # Request/Response Body Tests # ============================================================================ class TestRequestBody: """Test request body handling.""" def test_echo_small_body(self, http_client, gunicorn_url): """Test echoing small request body.""" body = b"Hello, World!" response = http_client.post(f"{gunicorn_url}/http/echo", content=body) assert response.status_code == 200 assert response.content == body def test_echo_large_body(self, http_client, gunicorn_url): """Test echoing large request body (1MB).""" body = b"x" * (1024 * 1024) response = http_client.post(f"{gunicorn_url}/http/echo", content=body) assert response.status_code == 200 assert len(response.content) == len(body) assert response.content == body def test_echo_empty_body(self, http_client, gunicorn_url): """Test echoing empty request body.""" response = http_client.post(f"{gunicorn_url}/http/echo", content=b"") assert response.status_code == 200 assert response.content == b"" def test_post_json(self, http_client, gunicorn_url): """Test posting and receiving JSON.""" data = {"name": "test", "value": 123, "nested": {"key": "value"}} response = http_client.post( f"{gunicorn_url}/http/post-json", json=data ) assert response.status_code == 200 result = response.json() assert result["received"] == data assert result["type"] == "dict" def test_post_json_array(self, http_client, gunicorn_url): """Test posting JSON array.""" data = [1, 2, 3, "four", {"five": 5}] response = http_client.post( f"{gunicorn_url}/http/post-json", json=data ) assert response.status_code == 200 result = response.json() assert result["received"] == data assert result["type"] == "list" class TestResponseBody: """Test response body handling.""" def test_large_response(self, http_client, gunicorn_url): """Test receiving large response (1MB).""" response = http_client.get(f"{gunicorn_url}/http/large?size=1048576") assert response.status_code == 200 assert len(response.content) == 1048576 def test_large_response_custom_size(self, http_client, gunicorn_url): """Test receiving custom size response.""" size = 500000 response = http_client.get(f"{gunicorn_url}/http/large?size={size}") assert response.status_code == 200 assert len(response.content) == size # ============================================================================ # Header Tests # ============================================================================ class TestRequestHeaders: """Test request header handling.""" def test_headers_received(self, http_client, gunicorn_url): """Test that request headers are received correctly.""" response = http_client.get( f"{gunicorn_url}/http/headers", headers={ "X-Custom-Header": "custom-value", "X-Another-Header": "another-value", } ) assert response.status_code == 200 headers = response.json() assert headers.get("x-custom-header") == "custom-value" assert headers.get("x-another-header") == "another-value" def test_host_header(self, http_client, gunicorn_url): """Test Host header is received.""" response = http_client.get(f"{gunicorn_url}/http/headers") assert response.status_code == 200 headers = response.json() assert "host" in headers def test_user_agent_header(self, http_client, gunicorn_url): """Test User-Agent header is received.""" response = http_client.get( f"{gunicorn_url}/http/headers", headers={"User-Agent": "TestClient/1.0"} ) assert response.status_code == 200 headers = response.json() assert headers.get("user-agent") == "TestClient/1.0" def test_content_type_header(self, http_client, gunicorn_url): """Test Content-Type header on POST.""" response = http_client.post( f"{gunicorn_url}/http/headers", content=b"test", headers={"Content-Type": "application/octet-stream"} ) assert response.status_code == 200 headers = response.json() assert headers.get("content-type") == "application/octet-stream" class TestResponseHeaders: """Test response header handling.""" def test_content_type_response(self, http_client, gunicorn_url): """Test Content-Type in response.""" response = http_client.get(f"{gunicorn_url}/http/headers") assert "application/json" in response.headers.get("content-type", "") def test_content_length_response(self, http_client, gunicorn_url): """Test Content-Length in response.""" response = http_client.get(f"{gunicorn_url}/http/") assert "content-length" in response.headers # ============================================================================ # ASGI Scope Tests # ============================================================================ class TestASGIScope: """Test ASGI scope correctness.""" def test_scope_type(self, http_client, gunicorn_url): """Test scope type is 'http'.""" response = http_client.get(f"{gunicorn_url}/http/scope") assert response.status_code == 200 scope = response.json() assert scope["type"] == "http" def test_scope_asgi_version(self, http_client, gunicorn_url): """Test ASGI version in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert "asgi" in scope assert scope["asgi"]["version"] == "3.0" def test_scope_http_version(self, http_client, gunicorn_url): """Test HTTP version in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert scope["http_version"] in ("1.0", "1.1", "2") def test_scope_method(self, http_client, gunicorn_url): """Test method in scope.""" response = http_client.post(f"{gunicorn_url}/http/scope") scope = response.json() assert scope["method"] == "POST" def test_scope_scheme(self, http_client, gunicorn_url): """Test scheme in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert scope["scheme"] == "http" def test_scope_path(self, http_client, gunicorn_url): """Test path in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert scope["path"] == "/scope" def test_scope_query_string(self, http_client, gunicorn_url): """Test query string in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope?foo=bar&baz=qux") scope = response.json() assert scope["query_string"] == "foo=bar&baz=qux" def test_scope_headers_are_list(self, http_client, gunicorn_url): """Test headers in scope are list of 2-tuples.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert isinstance(scope["headers"], list) for header in scope["headers"]: assert isinstance(header, list) assert len(header) == 2 def test_scope_server(self, http_client, gunicorn_url): """Test server in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert scope["server"] is not None assert isinstance(scope["server"], list) assert len(scope["server"]) == 2 def test_scope_client(self, http_client, gunicorn_url): """Test client in scope.""" response = http_client.get(f"{gunicorn_url}/http/scope") scope = response.json() assert scope["client"] is not None assert isinstance(scope["client"], list) assert len(scope["client"]) == 2 # ============================================================================ # Query String Tests # ============================================================================ class TestQueryStrings: """Test query string handling.""" def test_simple_query(self, http_client, gunicorn_url): """Test simple query parameter.""" response = http_client.get(f"{gunicorn_url}/http/query?name=test") assert response.status_code == 200 data = response.json() assert data["params"]["name"] == "test" def test_multiple_params(self, http_client, gunicorn_url): """Test multiple query parameters.""" response = http_client.get(f"{gunicorn_url}/http/query?a=1&b=2&c=3") assert response.status_code == 200 data = response.json() assert data["params"]["a"] == "1" assert data["params"]["b"] == "2" assert data["params"]["c"] == "3" def test_empty_query(self, http_client, gunicorn_url): """Test empty query string.""" response = http_client.get(f"{gunicorn_url}/http/query") assert response.status_code == 200 data = response.json() assert data["raw"] == "" assert data["params"] == {} def test_url_encoded_query(self, http_client, gunicorn_url): """Test URL-encoded query parameters.""" response = http_client.get(f"{gunicorn_url}/http/query?name=hello%20world") assert response.status_code == 200 data = response.json() assert data["raw"] == "name=hello%20world" # ============================================================================ # Cookie Tests # ============================================================================ class TestCookies: """Test cookie handling.""" def test_set_cookie(self, http_client, gunicorn_url): """Test setting cookies.""" response = http_client.get(f"{gunicorn_url}/http/cookies?set=session=abc123") assert response.status_code == 200 assert "set-cookie" in response.headers def test_receive_cookie(self, http_client, gunicorn_url): """Test receiving cookies.""" response = http_client.get( f"{gunicorn_url}/http/cookies", cookies={"session": "test123"} ) assert response.status_code == 200 data = response.json() assert data["request_cookies"].get("session") == "test123" # ============================================================================ # Redirect Tests # ============================================================================ class TestRedirects: """Test redirect handling.""" def test_redirect_302(self, http_client, gunicorn_url): """Test 302 redirect.""" response = http_client.get(f"{gunicorn_url}/http/redirect?to=/http/&status=302") assert response.status_code == 302 assert response.headers.get("location") == "/http/" def test_redirect_301(self, http_client, gunicorn_url): """Test 301 redirect.""" response = http_client.get(f"{gunicorn_url}/http/redirect?to=/http/&status=301") assert response.status_code == 301 def test_redirect_307(self, http_client, gunicorn_url): """Test 307 redirect.""" response = http_client.get(f"{gunicorn_url}/http/redirect?to=/http/&status=307") assert response.status_code == 307 # ============================================================================ # Connection Tests # ============================================================================ class TestConnections: """Test connection handling.""" def test_multiple_requests_same_connection(self, http_client, gunicorn_url): """Test multiple requests on same connection (keep-alive).""" for i in range(5): response = http_client.get(f"{gunicorn_url}/http/") assert response.status_code == 200 def test_concurrent_requests(self, http_client, gunicorn_url): """Test concurrent requests.""" import concurrent.futures def make_request(i): httpx = pytest.importorskip("httpx") with httpx.Client(verify=False, timeout=30.0) as client: response = client.get(f"{gunicorn_url}/http/method") return response.status_code with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(make_request, i) for i in range(20)] results = [f.result() for f in concurrent.futures.as_completed(futures)] assert all(status == 200 for status in results) # ============================================================================ # Proxy Tests (via Nginx) # ============================================================================ class TestProxyRequests: """Test requests through nginx proxy.""" def test_proxy_basic_request(self, http_client, nginx_url): """Test basic request through proxy.""" response = http_client.get(f"{nginx_url}/http/") assert response.status_code == 200 assert response.text == "Hello, ASGI!" def test_proxy_headers_forwarded(self, http_client, nginx_url): """Test that proxy headers are forwarded.""" response = http_client.get(f"{nginx_url}/http/headers") assert response.status_code == 200 headers = response.json() # Nginx should add X-Forwarded-For assert "x-forwarded-for" in headers or "x-real-ip" in headers def test_proxy_large_request(self, http_client, nginx_url): """Test large request through proxy.""" body = b"x" * (100 * 1024) # 100KB response = http_client.post(f"{nginx_url}/http/echo", content=body) assert response.status_code == 200 assert len(response.content) == len(body) def test_proxy_large_response(self, http_client, nginx_url): """Test large response through proxy.""" response = http_client.get(f"{nginx_url}/http/large?size=1048576") assert response.status_code == 200 assert len(response.content) == 1048576 # ============================================================================ # HTTPS Tests # ============================================================================ @pytest.mark.ssl class TestHTTPS: """Test HTTPS connections.""" def test_https_basic_request(self, http_client, gunicorn_ssl_url): """Test basic HTTPS request.""" response = http_client.get(f"{gunicorn_ssl_url}/http/") assert response.status_code == 200 def test_https_scope_scheme(self, http_client, gunicorn_ssl_url): """Test scope scheme is https.""" response = http_client.get(f"{gunicorn_ssl_url}/http/scope") assert response.status_code == 200 scope = response.json() assert scope["scheme"] == "https" def test_https_via_proxy(self, http_client, nginx_ssl_url): """Test HTTPS through nginx proxy.""" response = http_client.get(f"{nginx_ssl_url}/http/") assert response.status_code == 200 # ============================================================================ # Error Handling Tests # ============================================================================ class TestErrorHandling: """Test error handling.""" def test_invalid_json_body(self, http_client, gunicorn_url): """Test handling of invalid JSON body.""" response = http_client.post( f"{gunicorn_url}/http/post-json", content=b"not valid json", headers={"Content-Type": "application/json"} ) assert response.status_code == 400 assert "Invalid JSON" in response.text def test_method_not_allowed(self, http_client, gunicorn_url): """Test method not allowed response.""" response = http_client.get(f"{gunicorn_url}/http/post-json") assert response.status_code == 405 benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_lifespan_compliance.py000066400000000000000000000221221514360242400306140ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Lifespan compliance integration tests for ASGI. Tests the ASGI lifespan protocol including startup, shutdown, and state sharing between lifespan and request handlers. """ import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.lifespan, pytest.mark.integration, ] # ============================================================================ # Basic Lifespan Tests # ============================================================================ class TestLifespanStartup: """Test lifespan startup behavior.""" def test_startup_complete(self, http_client, gunicorn_url): """Test that lifespan startup completed.""" response = http_client.get(f"{gunicorn_url}/lifespan/state") assert response.status_code == 200 data = response.json() # Check scope_state which is shared by main_app's lifespan handler assert data["scope_state_available"] is True assert data["scope_state"]["main_app_started"] is True def test_startup_called(self, http_client, gunicorn_url): """Test that startup was called (via scope state).""" response = http_client.get(f"{gunicorn_url}/lifespan/state") assert response.status_code == 200 data = response.json() # Scope state indicates main_app handled lifespan startup assert data["scope_state"]["main_app_started"] is True def test_startup_time_recorded(self, http_client, gunicorn_url): """Test that startup time was recorded.""" response = http_client.get(f"{gunicorn_url}/lifespan/state") assert response.status_code == 200 data = response.json() # Startup time is recorded in scope_state by main_app assert data["scope_state"]["startup_time"] is not None def test_health_after_startup(self, http_client, gunicorn_url): """Test health endpoint returns OK.""" # The main health endpoint is at /health, lifespan's is at /lifespan/health # but lifespan_app's health checks its own module_state which isn't set # Use the main app health instead response = http_client.get(f"{gunicorn_url}/health") assert response.status_code == 200 assert response.text == "OK" class TestLifespanInfo: """Test lifespan information endpoints.""" def test_lifespan_info_endpoint(self, http_client, gunicorn_url): """Test lifespan info endpoint.""" response = http_client.get(f"{gunicorn_url}/lifespan/lifespan-info") assert response.status_code == 200 data = response.json() assert data["lifespan_supported"] is True # scope_state_present indicates lifespan was handled (by main_app) assert data["scope_state_present"] is True def test_uptime_tracking(self, http_client, gunicorn_url): """Test uptime is tracked via main app info endpoint.""" # The lifespan_app's uptime won't be set since main_app handles lifespan # Use the main app's /info endpoint instead response = http_client.get(f"{gunicorn_url}/info") assert response.status_code == 200 data = response.json() assert data["uptime"] is not None assert data["uptime"] >= 0 # ============================================================================ # State Sharing Tests # ============================================================================ class TestStateSharing: """Test state sharing between lifespan and request handlers.""" def test_state_endpoint(self, http_client, gunicorn_url): """Test state endpoint returns state info.""" response = http_client.get(f"{gunicorn_url}/lifespan/state") assert response.status_code == 200 data = response.json() assert "module_state" in data def test_request_count_increments(self, http_client, gunicorn_url): """Test request count increments across requests.""" # Make first request response1 = http_client.get(f"{gunicorn_url}/lifespan/counter") assert response1.status_code == 200 count1 = response1.json()["counter"] # Make second request response2 = http_client.get(f"{gunicorn_url}/lifespan/counter") assert response2.status_code == 200 count2 = response2.json()["counter"] # Counter should have incremented assert count2 > count1 # ============================================================================ # Counter Tests # ============================================================================ class TestCounter: """Test counter functionality for state persistence.""" def test_counter_endpoint(self, http_client, gunicorn_url): """Test counter endpoint.""" response = http_client.get(f"{gunicorn_url}/lifespan/counter") assert response.status_code == 200 data = response.json() assert "counter" in data assert "source" in data def test_counter_increments_multiple_times(self, http_client, gunicorn_url): """Test counter increments across multiple requests.""" counts = [] for _ in range(5): response = http_client.get(f"{gunicorn_url}/lifespan/counter") counts.append(response.json()["counter"]) # Each count should be greater than the previous for i in range(1, len(counts)): assert counts[i] > counts[i - 1] # ============================================================================ # Root and Basic Endpoint Tests # ============================================================================ class TestBasicEndpoints: """Test basic lifespan app endpoints.""" def test_root_endpoint(self, http_client, gunicorn_url): """Test root endpoint.""" response = http_client.get(f"{gunicorn_url}/lifespan/") assert response.status_code == 200 assert response.text == "Lifespan Test App" def test_not_found(self, http_client, gunicorn_url): """Test 404 for unknown path.""" response = http_client.get(f"{gunicorn_url}/lifespan/unknown-path") assert response.status_code == 404 # ============================================================================ # Proxy Lifespan Tests # ============================================================================ class TestProxyLifespan: """Test lifespan through nginx proxy.""" def test_proxy_health(self, http_client, nginx_url): """Test health through proxy.""" response = http_client.get(f"{nginx_url}/health") assert response.status_code == 200 assert response.text == "OK" def test_proxy_state(self, http_client, nginx_url): """Test state through proxy.""" response = http_client.get(f"{nginx_url}/lifespan/state") assert response.status_code == 200 data = response.json() assert data["scope_state"]["main_app_started"] is True def test_proxy_counter(self, http_client, nginx_url): """Test counter through proxy.""" response = http_client.get(f"{nginx_url}/lifespan/counter") assert response.status_code == 200 data = response.json() assert "counter" in data # ============================================================================ # HTTPS Lifespan Tests # ============================================================================ @pytest.mark.ssl class TestHTTPSLifespan: """Test lifespan over HTTPS.""" def test_https_health(self, http_client, gunicorn_ssl_url): """Test health over HTTPS.""" response = http_client.get(f"{gunicorn_ssl_url}/health") assert response.status_code == 200 def test_https_state(self, http_client, gunicorn_ssl_url): """Test state over HTTPS.""" response = http_client.get(f"{gunicorn_ssl_url}/lifespan/state") assert response.status_code == 200 data = response.json() assert data["scope_state"]["main_app_started"] is True def test_https_proxy_health(self, http_client, nginx_ssl_url): """Test health through HTTPS proxy.""" response = http_client.get(f"{nginx_ssl_url}/health") assert response.status_code == 200 # ============================================================================ # Concurrent Access Tests # ============================================================================ @pytest.mark.asyncio class TestConcurrentLifespan: """Test concurrent access to lifespan state.""" async def test_concurrent_counter_access(self, async_http_client_factory, gunicorn_url): """Test concurrent counter access.""" import asyncio async with await async_http_client_factory() as client: async def get_counter(): response = await client.get(f"{gunicorn_url}/lifespan/counter") return response.json()["counter"] # Run 10 concurrent requests tasks = [get_counter() for _ in range(10)] results = await asyncio.gather(*tasks) # All should be valid integers assert all(isinstance(r, int) for r in results) benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_streaming_compliance.py000066400000000000000000000417321514360242400310140ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Streaming compliance integration tests for ASGI. Tests chunked transfer encoding, Server-Sent Events (SSE), and streaming response handling. """ import json import time import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.streaming, pytest.mark.integration, ] # ============================================================================ # Basic Streaming Tests # ============================================================================ class TestBasicStreaming: """Test basic streaming response functionality.""" def test_streaming_endpoint(self, http_client, gunicorn_url): """Test basic streaming endpoint.""" response = http_client.get(f"{gunicorn_url}/stream/streaming") assert response.status_code == 200 assert "Chunk" in response.text def test_streaming_multiple_chunks(self, http_client, gunicorn_url): """Test streaming returns multiple chunks.""" response = http_client.get(f"{gunicorn_url}/stream/streaming?chunks=5") assert response.status_code == 200 lines = response.text.strip().split("\n") assert len(lines) == 5 assert "Chunk 1 of 5" in lines[0] assert "Chunk 5 of 5" in lines[4] def test_streaming_single_chunk(self, http_client, gunicorn_url): """Test streaming with single chunk.""" response = http_client.get(f"{gunicorn_url}/stream/streaming?chunks=1") assert response.status_code == 200 assert "Chunk 1 of 1" in response.text class TestChunkedStreaming: """Test chunked streaming with the streaming client.""" def test_stream_chunks_received(self, streaming_client, gunicorn_url): """Test that chunks are received incrementally.""" chunks = list(streaming_client.stream_chunks(f"{gunicorn_url}/stream/streaming?chunks=3")) assert len(chunks) >= 1 full_content = b"".join(chunks).decode("utf-8") assert "Chunk 1" in full_content assert "Chunk 3" in full_content def test_stream_variable_chunk_sizes(self, streaming_client, gunicorn_url): """Test streaming with variable chunk sizes.""" chunks = list(streaming_client.stream_chunks( f"{gunicorn_url}/stream/chunked?sizes=100,500,200" )) total_size = sum(len(c) for c in chunks) assert total_size == 800 # 100 + 500 + 200 def test_stream_lines(self, streaming_client, gunicorn_url): """Test streaming response line by line.""" lines = list(streaming_client.stream_lines(f"{gunicorn_url}/stream/streaming?chunks=5")) non_empty_lines = [l for l in lines if l.strip()] assert len(non_empty_lines) == 5 # ============================================================================ # Server-Sent Events (SSE) Tests # ============================================================================ class TestServerSentEvents: """Test Server-Sent Events functionality.""" def test_sse_content_type(self, http_client, gunicorn_url): """Test SSE has correct content type.""" response = http_client.get(f"{gunicorn_url}/stream/sse?events=1") assert response.status_code == 200 assert "text/event-stream" in response.headers.get("content-type", "") def test_sse_event_format(self, http_client, gunicorn_url): """Test SSE event format.""" response = http_client.get(f"{gunicorn_url}/stream/sse?events=3&delay=0.1") assert response.status_code == 200 # Parse SSE events events = [] for event_text in response.text.split("\n\n"): if event_text.strip(): event = {} for line in event_text.strip().split("\n"): if line.startswith("id: "): event["id"] = line[4:] elif line.startswith("event: "): event["event"] = line[7:] elif line.startswith("data: "): event["data"] = line[6:] if event: events.append(event) assert len(events) == 3 assert events[0]["id"] == "1" assert events[0]["event"] == "message" def test_sse_data_is_json(self, http_client, gunicorn_url): """Test SSE data contains valid JSON.""" response = http_client.get(f"{gunicorn_url}/stream/sse?events=1") assert response.status_code == 200 # Find data line for line in response.text.split("\n"): if line.startswith("data: "): data = json.loads(line[6:]) assert "id" in data assert "timestamp" in data break def test_sse_multiple_events(self, http_client, gunicorn_url): """Test receiving multiple SSE events.""" response = http_client.get(f"{gunicorn_url}/stream/sse?events=5&delay=0.05") assert response.status_code == 200 # Count events by counting "id:" lines id_count = response.text.count("id: ") assert id_count == 5 class TestSSEClient: """Test SSE with dedicated SSE client.""" def test_sse_client_receives_events(self, sse_client, gunicorn_url): """Test SSE client receives events.""" events = list(sse_client.stream(f"{gunicorn_url}/stream/sse?events=3&delay=0.1")) assert len(events) == 3 def test_sse_client_parses_data(self, sse_client, gunicorn_url): """Test SSE client parses event data.""" events = list(sse_client.stream(f"{gunicorn_url}/stream/sse?events=2&delay=0.1")) for event in events: assert event["event"] == "message" assert event["data"] is not None data = json.loads(event["data"]) assert "id" in data # ============================================================================ # NDJSON Streaming Tests # ============================================================================ class TestNDJSONStreaming: """Test Newline-Delimited JSON streaming.""" def test_ndjson_content_type(self, http_client, gunicorn_url): """Test NDJSON has correct content type.""" response = http_client.get(f"{gunicorn_url}/stream/ndjson?records=1") assert response.status_code == 200 assert "application/x-ndjson" in response.headers.get("content-type", "") def test_ndjson_format(self, http_client, gunicorn_url): """Test NDJSON line format.""" response = http_client.get(f"{gunicorn_url}/stream/ndjson?records=3&delay=0") assert response.status_code == 200 lines = response.text.strip().split("\n") assert len(lines) == 3 for i, line in enumerate(lines): record = json.loads(line) assert record["id"] == i + 1 assert "timestamp" in record assert "data" in record def test_ndjson_streaming(self, streaming_client, gunicorn_url): """Test NDJSON received as stream.""" lines = list(streaming_client.stream_lines( f"{gunicorn_url}/stream/ndjson?records=5&delay=0.1" )) non_empty = [l for l in lines if l.strip()] assert len(non_empty) == 5 # ============================================================================ # Slow Streaming Tests # ============================================================================ class TestSlowStreaming: """Test slow/delayed streaming responses.""" def test_slow_stream_completes(self, http_client, gunicorn_url): """Test slow stream eventually completes.""" start = time.time() response = http_client.get(f"{gunicorn_url}/stream/slow-stream?chunks=3&delay=0.2") elapsed = time.time() - start assert response.status_code == 200 assert elapsed >= 0.4 # At least 2 delays assert "Slow chunk 3/3" in response.text def test_slow_stream_chunks_timed(self, streaming_client, gunicorn_url): """Test slow stream chunks arrive at intervals.""" chunks = [] times = [] for chunk in streaming_client.stream_chunks( f"{gunicorn_url}/stream/slow-stream?chunks=3&delay=0.3" ): chunks.append(chunk) times.append(time.time()) # Should have some time between chunks if len(times) >= 2: assert times[-1] - times[0] >= 0.3 # ============================================================================ # Large Streaming Tests # ============================================================================ class TestLargeStreaming: """Test large streaming responses.""" def test_large_stream_size(self, http_client, gunicorn_url): """Test large streaming response has correct size.""" size = 1024 * 1024 # 1MB response = http_client.get(f"{gunicorn_url}/stream/large-stream?size={size}") assert response.status_code == 200 assert len(response.content) == size def test_large_stream_chunked(self, streaming_client, gunicorn_url): """Test large streaming response arrives in chunks.""" size = 512 * 1024 # 512KB chunk_size = 64 * 1024 # 64KB chunks chunks = list(streaming_client.stream_chunks( f"{gunicorn_url}/stream/large-stream?size={size}&chunk={chunk_size}" )) total_size = sum(len(c) for c in chunks) assert total_size == size # Should have multiple chunks assert len(chunks) >= 2 # ============================================================================ # Echo Stream Tests # ============================================================================ class TestEchoStreaming: """Test streaming echo endpoint.""" def test_echo_stream_response(self, http_client, gunicorn_url): """Test echo stream returns chunked response.""" body = b"Hello, streaming world!" response = http_client.post( f"{gunicorn_url}/stream/echo-stream", content=body ) assert response.status_code == 200 assert b"chunk" in response.content.lower() def test_echo_stream_large_body(self, http_client, gunicorn_url): """Test echo stream with large body.""" body = b"x" * (100 * 1024) # 100KB response = http_client.post( f"{gunicorn_url}/stream/echo-stream", content=body ) assert response.status_code == 200 assert b"Total chunks received" in response.content # ============================================================================ # Transfer-Encoding Tests # ============================================================================ class TestTransferEncoding: """Test Transfer-Encoding header handling.""" def test_chunked_encoding_header(self, http_client, gunicorn_url): """Test response uses chunked transfer encoding.""" response = http_client.get(f"{gunicorn_url}/stream/streaming?chunks=3") assert response.status_code == 200 # Note: httpx may decompress/dechunk, so we check the response completed assert "Chunk" in response.text def test_no_content_length_in_stream(self, http_client, gunicorn_url): """Test streaming response may not have Content-Length.""" # This is implementation-dependent; chunked encoding doesn't require it response = http_client.get(f"{gunicorn_url}/stream/streaming?chunks=3") assert response.status_code == 200 # The response should complete successfully regardless # ============================================================================ # Proxy Streaming Tests # ============================================================================ class TestProxyStreaming: """Test streaming through nginx proxy.""" def test_proxy_streaming(self, http_client, nginx_url): """Test streaming through proxy.""" response = http_client.get(f"{nginx_url}/stream/streaming?chunks=3") assert response.status_code == 200 assert "Chunk" in response.text def test_proxy_sse(self, http_client, nginx_url): """Test SSE through proxy.""" response = http_client.get(f"{nginx_url}/stream/sse?events=3&delay=0.1") assert response.status_code == 200 assert "text/event-stream" in response.headers.get("content-type", "") assert "id: 1" in response.text def test_proxy_large_stream(self, http_client, nginx_url): """Test large streaming through proxy.""" size = 512 * 1024 response = http_client.get(f"{nginx_url}/stream/large-stream?size={size}") assert response.status_code == 200 assert len(response.content) == size def test_proxy_slow_stream(self, streaming_client, nginx_url): """Test slow streaming through proxy.""" chunks = list(streaming_client.stream_chunks( f"{nginx_url}/stream/slow-stream?chunks=3&delay=0.2" )) full_content = b"".join(chunks).decode("utf-8") assert "Slow chunk 3/3" in full_content # ============================================================================ # HTTPS Streaming Tests # ============================================================================ @pytest.mark.ssl class TestHTTPSStreaming: """Test streaming over HTTPS.""" def test_https_streaming(self, http_client, gunicorn_ssl_url): """Test streaming over HTTPS.""" response = http_client.get(f"{gunicorn_ssl_url}/stream/streaming?chunks=3") assert response.status_code == 200 assert "Chunk" in response.text def test_https_sse(self, http_client, gunicorn_ssl_url): """Test SSE over HTTPS.""" response = http_client.get(f"{gunicorn_ssl_url}/stream/sse?events=2&delay=0.1") assert response.status_code == 200 assert "id: 1" in response.text def test_https_proxy_streaming(self, http_client, nginx_ssl_url): """Test streaming through HTTPS proxy.""" response = http_client.get(f"{nginx_ssl_url}/stream/streaming?chunks=3") assert response.status_code == 200 # ============================================================================ # Async Streaming Tests # ============================================================================ @pytest.mark.asyncio class TestAsyncStreaming: """Test streaming with async client.""" async def test_async_streaming(self, async_http_client_factory, gunicorn_url): """Test async streaming.""" async with await async_http_client_factory() as client: response = await client.get(f"{gunicorn_url}/stream/streaming?chunks=3") assert response.status_code == 200 assert "Chunk" in response.text async def test_async_stream_chunks(self, async_http_client_factory, gunicorn_url): """Test async streaming with iter_bytes.""" async with await async_http_client_factory() as client: chunks = [] async with client.stream("GET", f"{gunicorn_url}/stream/streaming?chunks=5") as response: async for chunk in response.aiter_bytes(): if chunk: chunks.append(chunk) full_content = b"".join(chunks).decode("utf-8") assert "Chunk 5 of 5" in full_content async def test_async_sse(self, async_http_client_factory, gunicorn_url): """Test async SSE streaming.""" async with await async_http_client_factory() as client: events = [] async with client.stream( "GET", f"{gunicorn_url}/stream/sse?events=3&delay=0.1" ) as response: buffer = "" async for chunk in response.aiter_text(): buffer += chunk while "\n\n" in buffer: event_text, buffer = buffer.split("\n\n", 1) if event_text.strip(): events.append(event_text) assert len(events) == 3 # ============================================================================ # Edge Cases # ============================================================================ class TestStreamingEdgeCases: """Test streaming edge cases.""" def test_empty_stream(self, http_client, gunicorn_url): """Test streaming with zero chunks.""" response = http_client.get(f"{gunicorn_url}/stream/streaming?chunks=0") assert response.status_code == 200 # Should complete without error def test_single_byte_chunks(self, streaming_client, gunicorn_url): """Test with very small chunks.""" response_chunks = list(streaming_client.stream_chunks( f"{gunicorn_url}/stream/chunked?sizes=1,1,1,1,1" )) total_size = sum(len(c) for c in response_chunks) assert total_size == 5 def test_sse_no_delay(self, http_client, gunicorn_url): """Test SSE with no delay between events.""" response = http_client.get(f"{gunicorn_url}/stream/sse?events=10&delay=0") assert response.status_code == 200 assert response.text.count("id:") == 10 benoitc-gunicorn-f5fb19e/tests/docker/asgi_compliance/test_websocket_compliance.py000066400000000000000000000711651514360242400310140ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WebSocket compliance integration tests for ASGI. Tests RFC 6455 WebSocket protocol compliance including handshake, messaging, close codes, and subprotocol negotiation. """ import asyncio import json import pytest pytestmark = [ pytest.mark.docker, pytest.mark.asgi, pytest.mark.websocket, pytest.mark.integration, ] # ============================================================================ # WebSocket Handshake Tests # ============================================================================ @pytest.mark.asyncio class TestWebSocketHandshake: """Test WebSocket handshake and connection establishment.""" async def test_basic_connection(self, websocket_connect, gunicorn_url): """Test basic WebSocket connection.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: # Connection successful - verify by sending a message await ws.send("test") response = await ws.recv() assert response == "test" async def test_echo_after_connect(self, websocket_connect, gunicorn_url): """Test sending message after connection.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: await ws.send("hello") response = await ws.recv() assert response == "hello" async def test_connection_path_preserved(self, websocket_connect, gunicorn_url): """Test that connection path is preserved in scope.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["path"] == "/ws/scope" async def test_connection_with_query_string(self, websocket_connect, gunicorn_url): """Test connection with query string.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope?foo=bar&baz=qux" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert "foo=bar" in scope["query_string"] assert "baz=qux" in scope["query_string"] # ============================================================================ # Text Message Tests # ============================================================================ @pytest.mark.asyncio class TestTextMessages: """Test WebSocket text message handling.""" async def test_echo_text(self, websocket_connect, gunicorn_url): """Test echoing text message.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: await ws.send("Hello, WebSocket!") response = await ws.recv() assert response == "Hello, WebSocket!" async def test_echo_unicode(self, websocket_connect, gunicorn_url): """Test echoing unicode text.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: message = "Hello \u4e16\u754c! \U0001f600" # Hello World in Chinese + emoji await ws.send(message) response = await ws.recv() assert response == message async def test_echo_empty_string(self, websocket_connect, gunicorn_url): """Test echoing empty string.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: await ws.send("") response = await ws.recv() assert response == "" async def test_multiple_messages(self, websocket_connect, gunicorn_url): """Test sending multiple messages.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: messages = ["first", "second", "third"] for msg in messages: await ws.send(msg) response = await ws.recv() assert response == msg async def test_rapid_messages(self, websocket_connect, gunicorn_url): """Test sending messages rapidly.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: count = 100 for i in range(count): await ws.send(f"message {i}") for i in range(count): response = await ws.recv() assert f"message {i}" == response # ============================================================================ # Binary Message Tests # ============================================================================ @pytest.mark.asyncio class TestBinaryMessages: """Test WebSocket binary message handling.""" async def test_echo_binary(self, websocket_connect, gunicorn_url): """Test echoing binary message.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo-binary" async with await websocket_connect(ws_url) as ws: data = b"\x00\x01\x02\x03\x04\x05" await ws.send(data) response = await ws.recv() assert response == data async def test_echo_binary_large(self, websocket_connect, gunicorn_url): """Test echoing larger binary message.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo-binary" async with await websocket_connect(ws_url) as ws: data = bytes(range(256)) * 100 # 25.6KB await ws.send(data) response = await ws.recv() assert response == data async def test_text_to_binary_conversion(self, websocket_connect, gunicorn_url): """Test text converted to binary in binary endpoint.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo-binary" async with await websocket_connect(ws_url) as ws: await ws.send("hello") response = await ws.recv() assert response == b"hello" # ============================================================================ # Subprotocol Negotiation Tests # ============================================================================ @pytest.mark.asyncio class TestSubprotocols: """Test WebSocket subprotocol negotiation.""" async def test_single_subprotocol(self, websocket_connect, gunicorn_url): """Test single subprotocol negotiation.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/subprotocol" async with await websocket_connect(ws_url, subprotocols=["json"]) as ws: response = await ws.recv() data = json.loads(response) assert data["selected"] == "json" assert data["requested"] == ["json"] async def test_multiple_subprotocols(self, websocket_connect, gunicorn_url): """Test multiple subprotocol negotiation.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/subprotocol" async with await websocket_connect(ws_url, subprotocols=["wamp", "json"]) as ws: response = await ws.recv() data = json.loads(response) # Server prefers json over wamp assert data["selected"] == "json" assert set(data["requested"]) == {"wamp", "json"} async def test_preferred_subprotocol(self, websocket_connect, gunicorn_url): """Test server-preferred subprotocol selection.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/subprotocol" async with await websocket_connect(ws_url, subprotocols=["json", "graphql-ws"]) as ws: response = await ws.recv() data = json.loads(response) # Server prefers graphql-ws assert data["selected"] == "graphql-ws" async def test_no_subprotocol(self, websocket_connect, gunicorn_url): """Test connection without subprotocol.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/subprotocol" async with await websocket_connect(ws_url) as ws: response = await ws.recv() data = json.loads(response) assert data["selected"] is None assert data["requested"] == [] # ============================================================================ # Close Code Tests # ============================================================================ @pytest.mark.asyncio class TestCloseCodes: """Test WebSocket close code handling.""" async def test_normal_close(self, websocket_connect, gunicorn_url): """Test normal close (1000).""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/close?code=1000" async with await websocket_connect(ws_url) as ws: try: await ws.recv() except websockets.exceptions.ConnectionClosed as e: assert e.code == 1000 async def test_going_away_close(self, websocket_connect, gunicorn_url): """Test going away close (1001).""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/close?code=1001" async with await websocket_connect(ws_url) as ws: try: await ws.recv() except websockets.exceptions.ConnectionClosed as e: assert e.code == 1001 async def test_protocol_error_close(self, websocket_connect, gunicorn_url): """Test protocol error close (1002).""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/close?code=1002" async with await websocket_connect(ws_url) as ws: try: await ws.recv() except websockets.exceptions.ConnectionClosed as e: assert e.code == 1002 async def test_close_with_reason(self, websocket_connect, gunicorn_url): """Test close with reason message.""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/close?code=1000&reason=goodbye" async with await websocket_connect(ws_url) as ws: try: await ws.recv() except websockets.exceptions.ConnectionClosed as e: assert e.code == 1000 assert e.reason == "goodbye" async def test_application_close_code(self, websocket_connect, gunicorn_url): """Test application-defined close code (4000+).""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/close?code=4001" async with await websocket_connect(ws_url) as ws: try: await ws.recv() except websockets.exceptions.ConnectionClosed as e: assert e.code == 4001 # ============================================================================ # Connection Rejection Tests # ============================================================================ @pytest.mark.asyncio class TestConnectionRejection: """Test WebSocket connection rejection.""" async def test_reject_connection(self, websocket_connect, gunicorn_url): """Test connection rejection.""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/reject" # websockets v16+ raises InvalidStatus, older versions raise InvalidStatusCode with pytest.raises((websockets.exceptions.InvalidStatus, Exception)): async with await websocket_connect(ws_url): pass # ============================================================================ # WebSocket Scope Tests # ============================================================================ @pytest.mark.asyncio class TestWebSocketScope: """Test WebSocket ASGI scope correctness.""" async def test_scope_type(self, websocket_connect, gunicorn_url): """Test scope type is 'websocket'.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["type"] == "websocket" async def test_scope_asgi_version(self, websocket_connect, gunicorn_url): """Test scope has ASGI version.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert "asgi" in scope assert "version" in scope["asgi"] async def test_scope_http_version(self, websocket_connect, gunicorn_url): """Test scope has HTTP version.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["http_version"] in ["1.0", "1.1", "2"] async def test_scope_scheme(self, websocket_connect, gunicorn_url): """Test scope scheme is 'ws'.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["scheme"] == "ws" async def test_scope_server(self, websocket_connect, gunicorn_url): """Test scope has server info.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["server"] is not None assert len(scope["server"]) == 2 # (host, port) async def test_scope_client(self, websocket_connect, gunicorn_url): """Test scope has client info.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["client"] is not None assert len(scope["client"]) == 2 # (host, port) async def test_scope_headers(self, websocket_connect, gunicorn_url): """Test scope has headers.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect( ws_url, additional_headers={"X-Custom-Header": "test-value"} ) as ws: response = await ws.recv() scope = json.loads(response) headers = {name.lower(): value for name, value in scope["headers"]} assert "x-custom-header" in headers assert headers["x-custom-header"] == "test-value" # ============================================================================ # Large Message Tests # ============================================================================ @pytest.mark.asyncio class TestLargeMessages: """Test large WebSocket message handling.""" async def test_receive_large_message(self, websocket_connect, gunicorn_url): """Test receiving large message from server.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/large?size=65536" async with await websocket_connect(ws_url) as ws: response = await ws.recv() assert len(response) == 65536 assert response == "x" * 65536 async def test_send_large_message(self, websocket_connect, gunicorn_url): """Test sending large message to server.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/large?size=1024" async with await websocket_connect(ws_url) as ws: # First receive server's large message _ = await ws.recv() # Send our large message large_data = "y" * 100000 await ws.send(large_data) response = await ws.recv() data = json.loads(response) assert data["received_length"] == 100000 async def test_various_sizes(self, websocket_connect, gunicorn_url): """Test various message sizes.""" sizes = [1, 100, 1000, 10000, 50000] for size in sizes: ws_url = gunicorn_url.replace("http://", "ws://") + f"/ws/large?size={size}" async with await websocket_connect(ws_url) as ws: response = await ws.recv() assert len(response) == size, f"Expected {size}, got {len(response)}" # ============================================================================ # Broadcast/Multiple Message Tests # ============================================================================ @pytest.mark.asyncio class TestBroadcast: """Test broadcast-style multiple message sending.""" async def test_broadcast_default_count(self, websocket_connect, gunicorn_url): """Test broadcast with default count (3).""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/broadcast" async with await websocket_connect(ws_url) as ws: await ws.send("test message") responses = [] for _ in range(3): response = await ws.recv() responses.append(json.loads(response)) assert len(responses) == 3 for i, resp in enumerate(responses): assert resp["copy"] == i + 1 assert resp["of"] == 3 assert resp["message"] == "test message" async def test_broadcast_custom_count(self, websocket_connect, gunicorn_url): """Test broadcast with custom count.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/broadcast?count=5" async with await websocket_connect(ws_url) as ws: await ws.send("hello") responses = [] for _ in range(5): response = await ws.recv() responses.append(json.loads(response)) assert len(responses) == 5 # ============================================================================ # Delayed Response Tests # ============================================================================ @pytest.mark.asyncio class TestDelayedResponses: """Test WebSocket delayed responses.""" async def test_delayed_response(self, websocket_connect, gunicorn_url): """Test delayed response.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/delay?seconds=0.5" async with await websocket_connect(ws_url) as ws: import time start = time.time() await ws.send("ping") response = await asyncio.wait_for(ws.recv(), timeout=5.0) elapsed = time.time() - start assert elapsed >= 0.4 # Allow some tolerance data = json.loads(response) assert data["delayed_by"] == 0.5 assert data["message"] == "ping" async def test_minimal_delay(self, websocket_connect, gunicorn_url): """Test with minimal delay.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/delay?seconds=0.1" async with await websocket_connect(ws_url) as ws: await ws.send("quick") response = await asyncio.wait_for(ws.recv(), timeout=5.0) data = json.loads(response) assert data["delayed_by"] == 0.1 # ============================================================================ # Fragmented Message Tests # ============================================================================ @pytest.mark.asyncio class TestFragmentedMessages: """Test fragmented WebSocket message handling.""" async def test_fragmented_endpoint(self, websocket_connect, gunicorn_url): """Test fragmented message info endpoint.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/fragmented" async with await websocket_connect(ws_url) as ws: # First receive info message info = await ws.recv() data = json.loads(info) assert "info" in data async def test_message_reassembly(self, websocket_connect, gunicorn_url): """Test that messages are reassembled correctly.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/fragmented" async with await websocket_connect(ws_url) as ws: # Skip info message await ws.recv() # Send message await ws.send("complete message") response = await ws.recv() data = json.loads(response) assert data["received"] == "complete message" assert data["length"] == len("complete message") assert data["type"] == "text" # ============================================================================ # Proxy WebSocket Tests # ============================================================================ @pytest.mark.asyncio class TestProxyWebSocket: """Test WebSocket through nginx proxy.""" async def test_proxy_echo(self, websocket_connect, nginx_url): """Test echo through proxy.""" ws_url = nginx_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: await ws.send("proxied message") response = await ws.recv() assert response == "proxied message" async def test_proxy_binary(self, websocket_connect, nginx_url): """Test binary echo through proxy.""" ws_url = nginx_url.replace("http://", "ws://") + "/ws/echo-binary" async with await websocket_connect(ws_url) as ws: data = b"\x00\x01\x02\x03" await ws.send(data) response = await ws.recv() assert response == data async def test_proxy_subprotocol(self, websocket_connect, nginx_url): """Test subprotocol through proxy.""" ws_url = nginx_url.replace("http://", "ws://") + "/ws/subprotocol" async with await websocket_connect(ws_url, subprotocols=["json"]) as ws: response = await ws.recv() data = json.loads(response) assert data["selected"] == "json" async def test_proxy_scope(self, websocket_connect, nginx_url): """Test scope through proxy.""" ws_url = nginx_url.replace("http://", "ws://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["type"] == "websocket" assert scope["path"] == "/ws/scope" # ============================================================================ # HTTPS WebSocket Tests # ============================================================================ @pytest.mark.ssl @pytest.mark.asyncio class TestSecureWebSocket: """Test WebSocket over SSL/TLS.""" async def test_wss_connection(self, websocket_connect, gunicorn_ssl_url): """Test WSS connection.""" ws_url = gunicorn_ssl_url.replace("https://", "wss://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: await ws.send("secure message") response = await ws.recv() assert response == "secure message" async def test_wss_scope_scheme(self, websocket_connect, gunicorn_ssl_url): """Test WSS scope has correct scheme.""" ws_url = gunicorn_ssl_url.replace("https://", "wss://") + "/ws/scope" async with await websocket_connect(ws_url) as ws: response = await ws.recv() scope = json.loads(response) assert scope["scheme"] == "wss" async def test_wss_through_proxy(self, websocket_connect, nginx_ssl_url): """Test WSS through nginx proxy.""" ws_url = nginx_ssl_url.replace("https://", "wss://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: await ws.send("secure proxied") response = await ws.recv() assert response == "secure proxied" # ============================================================================ # Concurrent Connection Tests # ============================================================================ @pytest.mark.asyncio class TestConcurrentConnections: """Test concurrent WebSocket connections.""" async def test_multiple_concurrent_connections(self, websocket_connect, gunicorn_url): """Test multiple concurrent WebSocket connections.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async def echo_task(task_id): async with await websocket_connect(ws_url) as ws: message = f"task-{task_id}" await ws.send(message) response = await ws.recv() assert response == message return task_id # Run 10 concurrent connections tasks = [echo_task(i) for i in range(10)] results = await asyncio.gather(*tasks) assert len(results) == 10 assert set(results) == set(range(10)) async def test_concurrent_different_endpoints(self, websocket_connect, gunicorn_url): """Test concurrent connections to different endpoints.""" base_ws = gunicorn_url.replace("http://", "ws://") async def echo_text(): async with await websocket_connect(base_ws + "/ws/echo") as ws: await ws.send("text") return await ws.recv() async def echo_binary(): async with await websocket_connect(base_ws + "/ws/echo-binary") as ws: await ws.send(b"binary") return await ws.recv() async def get_scope(): async with await websocket_connect(base_ws + "/ws/scope") as ws: return await ws.recv() results = await asyncio.gather( echo_text(), echo_binary(), get_scope(), ) assert results[0] == "text" assert results[1] == b"binary" scope = json.loads(results[2]) assert scope["type"] == "websocket" # ============================================================================ # Edge Cases # ============================================================================ @pytest.mark.asyncio class TestWebSocketEdgeCases: """Test WebSocket edge cases.""" async def test_unknown_path(self, websocket_connect, gunicorn_url): """Test connection to unknown path.""" websockets = pytest.importorskip("websockets") ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/unknown-path" async with await websocket_connect(ws_url) as ws: response = await ws.recv() data = json.loads(response) assert data["error"] == "Unknown path" # Connection will be closed try: await ws.recv() except websockets.exceptions.ConnectionClosed: pass async def test_special_characters_in_message(self, websocket_connect, gunicorn_url): """Test messages with special characters.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: special = "!@#$%^&*()_+-=[]{}|;':\",./<>?\n\t\r" await ws.send(special) response = await ws.recv() assert response == special async def test_null_bytes_in_binary(self, websocket_connect, gunicorn_url): """Test binary message with null bytes.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo-binary" async with await websocket_connect(ws_url) as ws: data = b"\x00\x00\x00" await ws.send(data) response = await ws.recv() assert response == data async def test_json_message(self, websocket_connect, gunicorn_url): """Test JSON in text message.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" async with await websocket_connect(ws_url) as ws: payload = json.dumps({"key": "value", "number": 42, "list": [1, 2, 3]}) await ws.send(payload) response = await ws.recv() assert json.loads(response) == {"key": "value", "number": 42, "list": [1, 2, 3]} async def test_rapid_close_reconnect(self, websocket_connect, gunicorn_url): """Test rapid close and reconnect.""" ws_url = gunicorn_url.replace("http://", "ws://") + "/ws/echo" for i in range(5): async with await websocket_connect(ws_url) as ws: await ws.send(f"iteration {i}") response = await ws.recv() assert response == f"iteration {i}" benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/000077500000000000000000000000001514360242400227375ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/Dockerfile000066400000000000000000000011611514360242400247300ustar00rootroot00000000000000FROM python:3.12-slim WORKDIR /app # Copy gunicorn source COPY . /app/gunicorn-src # Install gunicorn and test dependencies # setproctitle is needed for process title changes (master, dirty-arbiter, etc.) RUN pip install --no-cache-dir /app/gunicorn-src pytest requests setproctitle # Copy test app files COPY tests/docker/dirty_arbiter/app.py /app/ COPY tests/docker/dirty_arbiter/gunicorn_conf.py /app/ # Install procps for process inspection RUN apt-get update && apt-get install -y procps && rm -rf /var/lib/apt/lists/* # Default command - run gunicorn CMD ["gunicorn", "app:application", "-c", "gunicorn_conf.py"] benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/README.md000066400000000000000000000073271514360242400242270ustar00rootroot00000000000000# Docker-Based Dirty Arbiter Integration Tests This directory contains Docker-based integration tests that verify the dirty arbiter process lifecycle under realistic conditions. ## Overview These tests verify: 1. **Parent Death Detection**: Dirty arbiter self-terminates when main arbiter dies unexpectedly (SIGKILL) 2. **Orphan Cleanup**: Old dirty arbiter processes are cleaned up on restart 3. **Respawning**: Main arbiter respawns dirty arbiter when it crashes 4. **Graceful Shutdown**: Both arbiters exit cleanly on SIGTERM ## Prerequisites - Docker - Python 3.10+ - pytest ## Quick Start ```bash # Build the Docker image docker compose build # Run all tests pytest test_parent_death.py -v # Run specific test pytest test_parent_death.py::TestParentDeath::test_dirty_arbiter_exits_on_parent_sigkill -v ``` ## Manual Verification You can manually verify the behavior: ```bash # Start the container docker compose up -d # Check running processes docker exec dirty_arbiter-gunicorn-1 ps aux | grep gunicorn # SIGKILL the master and watch dirty arbiter exit MASTER_PID=$(docker exec dirty_arbiter-gunicorn-1 pgrep -f "gunicorn: master") docker exec dirty_arbiter-gunicorn-1 kill -9 $MASTER_PID # After ~2 seconds, check that all gunicorn processes exited docker exec dirty_arbiter-gunicorn-1 ps aux | grep gunicorn # View logs docker logs dirty_arbiter-gunicorn-1 # Cleanup docker compose down ``` ## Test Scenarios ### Scenario 1: Parent SIGKILL Tests that the dirty arbiter detects parent death via ppid check: 1. Start gunicorn with dirty workers 2. SIGKILL the main arbiter (bypasses graceful shutdown) 3. Verify dirty arbiter detects ppid change within ~2 seconds 4. Verify no orphan processes remain ### Scenario 2: Orphan Cleanup Tests the `_cleanup_orphaned_dirty_arbiter()` mechanism: 1. Start gunicorn, note dirty arbiter PID 2. SIGKILL main arbiter (dirty arbiter becomes orphan) 3. Restart gunicorn 4. Verify old dirty arbiter was cleaned up 5. Verify new dirty arbiter spawned ### Scenario 3: Dirty Arbiter Respawn Tests that main arbiter respawns a dead dirty arbiter: 1. Start gunicorn 2. SIGKILL the dirty arbiter 3. Wait for respawn (~1-2 seconds) 4. Verify new dirty arbiter is running ### Scenario 4: Graceful Shutdown Tests clean shutdown via SIGTERM: 1. Start gunicorn with dirty workers 2. SIGTERM the main arbiter 3. Verify both arbiters exit cleanly within graceful_timeout 4. Verify clean exit logs ## Files | File | Description | |------|-------------| | `Dockerfile` | Container build configuration | | `docker-compose.yml` | Container orchestration | | `app.py` | Simple WSGI app with TestDirtyApp | | `gunicorn_conf.py` | Gunicorn configuration | | `test_parent_death.py` | pytest integration tests | | `README.md` | This file | ## Configuration The `gunicorn_conf.py` uses: - 1 sync worker - 1 dirty worker - 5 second graceful timeout (for faster tests) - Debug logging ## Expected Log Messages When verifying behavior, look for these log messages: | Message | Meaning | |---------|---------| | `Parent changed, shutting down dirty arbiter` | ppid detection triggered | | `Killing orphaned dirty arbiter` | Orphan cleanup activated | | `Spawning dirty arbiter` | New dirty arbiter being created | | `Dirty arbiter exiting` | Clean shutdown | ## Troubleshooting **Tests time out waiting for container**: - Check Docker is running - Check no port conflicts on 8000 - Try `docker compose down` and rebuild **Dirty arbiter doesn't exit after parent death**: - Check ppid detection is working (logs should show check) - The check runs every 1 second, so allow 2-3 seconds **Container logs not showing expected messages**: - Verify loglevel is set to "debug" in gunicorn_conf.py - Check `docker logs ` for full output benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/app.py000066400000000000000000000012601514360242400240700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Simple WSGI and Dirty applications for integration testing. """ from gunicorn.dirty.app import DirtyApp def application(environ, start_response): """Simple WSGI application.""" start_response('200 OK', [('Content-Type', 'text/plain')]) return [b'OK'] class TestDirtyApp(DirtyApp): """Minimal dirty app for testing process lifecycle.""" def init(self): self.call_count = 0 def ping(self): self.call_count += 1 return {"pong": True, "calls": self.call_count} def echo(self, message): return {"message": message} benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/docker-compose.yml000066400000000000000000000004571514360242400264020ustar00rootroot00000000000000services: gunicorn: build: context: ../../.. dockerfile: tests/docker/dirty_arbiter/Dockerfile ports: - "8000:8000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/"] interval: 1s timeout: 1s retries: 30 stop_grace_period: 10s benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/gunicorn_conf.py000066400000000000000000000006171514360242400261460ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn configuration for integration tests. """ bind = "0.0.0.0:8000" workers = 1 worker_class = "sync" dirty_workers = 1 dirty_apps = ["app:TestDirtyApp"] dirty_timeout = 30 dirty_graceful_timeout = 5 timeout = 30 graceful_timeout = 5 loglevel = "debug" accesslog = "-" errorlog = "-" benoitc-gunicorn-f5fb19e/tests/docker/dirty_arbiter/test_parent_death.py000066400000000000000000000442771514360242400270240ustar00rootroot00000000000000#!/usr/bin/env python # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Docker-based integration tests for dirty arbiter process lifecycle. These tests verify: 1. Dirty arbiter self-terminates when main arbiter dies unexpectedly (SIGKILL) 2. Orphan cleanup works on gunicorn restart 3. Dirty arbiter respawn works when it dies 4. Graceful shutdown terminates both arbiters cleanly Usage: # Build the container first docker compose build # Run all tests pytest test_parent_death.py -v # Run specific test pytest test_parent_death.py::TestParentDeath::test_dirty_arbiter_exits_on_parent_sigkill -v """ import os import re import subprocess import time import pytest class DockerContainer: """Context manager for managing a Docker container.""" def __init__(self, name="gunicorn-test", build=True): self.name = name self.build = build self.container_id = None def __enter__(self): # Build if requested if self.build: result = subprocess.run( ["docker", "compose", "build"], cwd=os.path.dirname(__file__), capture_output=True, text=True, ) if result.returncode != 0: raise RuntimeError(f"Docker build failed: {result.stderr}") # Remove any existing container with same name subprocess.run( ["docker", "rm", "-f", self.name], capture_output=True, ) # Start container with a keep-alive wrapper # This runs gunicorn in background so killing master doesn't exit container # The wrapper keeps container alive for observation after master death result = subprocess.run( [ "docker", "run", "-d", "--name", self.name, "-p", "8000:8000", "dirty_arbiter-gunicorn", "sh", "-c", "gunicorn app:application -c gunicorn_conf.py & " "GUNICORN_PID=$!; " "trap 'kill $GUNICORN_PID 2>/dev/null' TERM; " "while true; do sleep 1; done" ], capture_output=True, text=True, ) if result.returncode != 0: raise RuntimeError(f"Docker run failed: {result.stderr}") self.container_id = result.stdout.strip() # Wait for gunicorn to be ready self._wait_for_ready() return self def __exit__(self, exc_type, exc_val, exc_tb): if self.container_id: # Get logs before cleanup logs = self.get_logs() if exc_val: print(f"\n=== Container logs ===\n{logs}\n=== End logs ===\n") # Stop and remove container subprocess.run( ["docker", "rm", "-f", self.name], capture_output=True, ) def _wait_for_ready(self, timeout=30): """Wait for gunicorn to be ready.""" start = time.time() while time.time() - start < timeout: pids = self.get_gunicorn_pids() if pids.get("master") and pids.get("dirty-arbiter"): # Both processes are running return time.sleep(0.5) raise TimeoutError("Gunicorn did not start within timeout") def exec(self, cmd, check=True): """Execute a command in the container.""" result = subprocess.run( ["docker", "exec", self.name] + cmd, capture_output=True, text=True, ) if check and result.returncode != 0: raise RuntimeError(f"Command failed: {cmd}\n{result.stderr}") return result def get_logs(self): """Get container logs.""" result = subprocess.run( ["docker", "logs", self.name], capture_output=True, text=True, ) return result.stdout + result.stderr def get_gunicorn_pids(self): """Get PIDs of gunicorn processes. Uses ps output with proctitle if available, otherwise falls back to process tree analysis. """ pids = { "master": None, "dirty-arbiter": None, "workers": [], "dirty-workers": [], } # First try using proctitle-based detection result = self.exec(["ps", "aux"], check=False) proctitle_found = False for line in result.stdout.split("\n"): if "gunicorn:" not in line: continue proctitle_found = True parts = line.split() if len(parts) < 2: continue pid = int(parts[1]) if "gunicorn: master" in line: pids["master"] = pid elif "gunicorn: dirty-arbiter" in line: pids["dirty-arbiter"] = pid elif "gunicorn: dirty-worker" in line: pids["dirty-workers"].append(pid) elif "gunicorn: worker" in line: pids["workers"].append(pid) if proctitle_found: return pids # Fallback: use process tree analysis # Get ps output with ppid info result = self.exec(["ps", "-eo", "pid,ppid,comm"], check=False) gunicorn_procs = [] for line in result.stdout.split("\n"): if "gunicorn" not in line and "python" not in line: continue parts = line.split() if len(parts) >= 3: try: pid = int(parts[0]) ppid = int(parts[1]) gunicorn_procs.append((pid, ppid)) except ValueError: continue # Build process tree # Master: gunicorn process whose parent is init (pid 1 or docker-init) # Dirty-arbiter: child of master # Workers: children of master (that aren't dirty-arbiter) # Dirty-workers: children of dirty-arbiter for pid, ppid in gunicorn_procs: if ppid == 1 or ppid == 0: # This is the master (or docker-init spawned process) # Check if it's actually docker-init by checking its children continue if ppid not in [p for p, _ in gunicorn_procs]: # Parent isn't a gunicorn process - this is master pids["master"] = pid # Now identify children if pids["master"]: master_children = [p for p, pp in gunicorn_procs if pp == pids["master"]] # Get first child as dirty-arbiter (forked first from spawn_dirty_arbiter) # and check if it has children (dirty workers) for child_pid in master_children: child_children = [p for p, pp in gunicorn_procs if pp == child_pid] if child_children: # This child has children, so it's the dirty-arbiter pids["dirty-arbiter"] = child_pid pids["dirty-workers"] = child_children else: # No children, it's a regular worker pids["workers"].append(child_pid) return pids def kill_process(self, pid, signal=9): """Send a signal to a process in the container.""" self.exec( ["kill", f"-{signal}", str(pid)], check=False, ) def wait_for_process_exit(self, pid, timeout=5): """Wait for a specific process to exit.""" start = time.time() while time.time() - start < timeout: result = self.exec( ["ps", "-p", str(pid)], check=False, ) if result.returncode != 0: # Process no longer exists return True time.sleep(0.2) return False def wait_for_no_gunicorn(self, timeout=5): """Wait until no gunicorn processes are running.""" start = time.time() while time.time() - start < timeout: pids = self.get_gunicorn_pids() if not any([ pids["master"], pids["dirty-arbiter"], pids["workers"], pids["dirty-workers"], ]): return True time.sleep(0.2) return False def wait_for_dirty_arbiter(self, timeout=10, exclude_pid=None): """Wait for a dirty arbiter to be running.""" start = time.time() while time.time() - start < timeout: pids = self.get_gunicorn_pids() da_pid = pids.get("dirty-arbiter") if da_pid and da_pid != exclude_pid: return da_pid time.sleep(0.5) return None def restart_gunicorn(self): """Restart gunicorn in the container.""" # Start gunicorn in background self.exec( ["sh", "-c", "gunicorn app:application -c gunicorn_conf.py &"], check=False, ) # Wait for it to be ready self._wait_for_ready() class TestParentDeath: """Test dirty arbiter behavior when parent dies.""" @pytest.fixture(autouse=True) def setup(self): """Check Docker is available.""" result = subprocess.run( ["docker", "info"], capture_output=True, ) if result.returncode != 0: pytest.skip("Docker is not available") def test_dirty_arbiter_exits_on_parent_sigkill(self): """Dirty arbiter should exit when main arbiter is SIGKILLed. This tests the ppid detection mechanism in the dirty arbiter. When the main arbiter is killed with SIGKILL (which bypasses graceful shutdown), the dirty arbiter should detect the parent change and exit within ~2 seconds. """ with DockerContainer() as container: # Get initial PIDs pids = container.get_gunicorn_pids() master_pid = pids["master"] dirty_arbiter_pid = pids["dirty-arbiter"] assert master_pid is not None, "Master should be running" assert dirty_arbiter_pid is not None, "Dirty arbiter should be running" # SIGKILL the main arbiter (bypasses graceful shutdown) container.kill_process(master_pid, signal=9) # Wait for dirty arbiter to detect parent death and exit # The ppid check runs every 1 second. During shutdown, the arbiter # may take extra time to complete worker cleanup and handle SIGCHLD. exited = container.wait_for_process_exit(dirty_arbiter_pid, timeout=10) assert exited, ( f"Dirty arbiter (pid:{dirty_arbiter_pid}) should have exited " "after parent was killed" ) # Verify no orphan gunicorn processes remain # HTTP workers check ppid during request loop, so may take longer to exit assert container.wait_for_no_gunicorn(timeout=15), ( "No gunicorn processes should remain after parent death" ) # Check logs for expected message logs = container.get_logs() assert "Parent changed, shutting down dirty arbiter" in logs, ( "Dirty arbiter should log parent death detection" ) def test_orphan_cleanup_on_restart(self): """Orphaned dirty arbiter should be cleaned up on restart. This tests the _cleanup_orphaned_dirty_arbiter() mechanism. When gunicorn restarts after a crash, it should kill any orphaned dirty arbiter from the previous instance. """ with DockerContainer() as container: # Get initial PIDs pids = container.get_gunicorn_pids() master_pid = pids["master"] dirty_arbiter_pid = pids["dirty-arbiter"] assert master_pid is not None assert dirty_arbiter_pid is not None # SIGKILL the main arbiter - dirty arbiter becomes orphan # but will self-terminate via ppid detection container.kill_process(master_pid, signal=9) # Wait for all gunicorn processes to exit before restarting # (including HTTP workers which take longer due to ppid check interval) container.wait_for_no_gunicorn(timeout=20) # Now restart gunicorn container.restart_gunicorn() # Get new PIDs new_pids = container.get_gunicorn_pids() new_dirty_arbiter_pid = new_pids["dirty-arbiter"] assert new_dirty_arbiter_pid is not None, ( "New dirty arbiter should have spawned" ) assert new_dirty_arbiter_pid != dirty_arbiter_pid, ( "New dirty arbiter should have different PID" ) # Check logs for orphan cleanup or normal startup logs = container.get_logs() # Either the orphan was cleaned up, or ppid detection worked assert ( "Killing orphaned dirty arbiter" in logs or "Parent changed, shutting down dirty arbiter" in logs or "Dirty arbiter starting" in logs ) def test_dirty_arbiter_respawn(self): """Main arbiter should respawn dead dirty arbiter. When the dirty arbiter dies (e.g., killed or crashed), the main arbiter should detect this and spawn a new one. """ with DockerContainer() as container: # Get initial PIDs pids = container.get_gunicorn_pids() master_pid = pids["master"] old_dirty_arbiter_pid = pids["dirty-arbiter"] assert master_pid is not None assert old_dirty_arbiter_pid is not None # SIGKILL the dirty arbiter container.kill_process(old_dirty_arbiter_pid, signal=9) # Wait for respawn - main arbiter should spawn a new one new_dirty_arbiter_pid = container.wait_for_dirty_arbiter( timeout=10, exclude_pid=old_dirty_arbiter_pid, ) assert new_dirty_arbiter_pid is not None, ( "Main arbiter should respawn dirty arbiter" ) assert new_dirty_arbiter_pid != old_dirty_arbiter_pid, ( "New dirty arbiter should have different PID" ) # Verify main arbiter is still running pids = container.get_gunicorn_pids() assert pids["master"] == master_pid, ( "Main arbiter should still be running" ) # Check logs logs = container.get_logs() assert "Spawning dirty arbiter" in logs or "Spawned dirty arbiter" in logs def test_graceful_shutdown(self): """SIGTERM should cleanly shutdown both arbiters. When the main arbiter receives SIGTERM, it should signal the dirty arbiter and wait for both to exit cleanly. """ with DockerContainer() as container: # Get initial PIDs pids = container.get_gunicorn_pids() master_pid = pids["master"] dirty_arbiter_pid = pids["dirty-arbiter"] assert master_pid is not None assert dirty_arbiter_pid is not None # Send SIGTERM to main arbiter container.kill_process(master_pid, signal=15) # Wait for both to exit cleanly # Graceful timeout is 5 seconds in config assert container.wait_for_no_gunicorn(timeout=10), ( "All gunicorn processes should exit on SIGTERM" ) # Check logs for graceful shutdown indicators logs = container.get_logs() assert "Dirty arbiter exiting" in logs, ( "Dirty arbiter should log clean exit" ) def test_sigquit_quick_shutdown(self): """SIGQUIT should quickly shutdown both arbiters. SIGQUIT triggers a faster shutdown than SIGTERM. """ with DockerContainer() as container: # Get initial PIDs pids = container.get_gunicorn_pids() master_pid = pids["master"] dirty_arbiter_pid = pids["dirty-arbiter"] assert master_pid is not None assert dirty_arbiter_pid is not None # Send SIGQUIT to main arbiter container.kill_process(master_pid, signal=3) # Both should exit quickly assert container.wait_for_no_gunicorn(timeout=5), ( "All gunicorn processes should exit on SIGQUIT" ) class TestDirtyArbiterWorkers: """Test dirty arbiter worker management.""" @pytest.fixture(autouse=True) def setup(self): """Check Docker is available.""" result = subprocess.run( ["docker", "info"], capture_output=True, ) if result.returncode != 0: pytest.skip("Docker is not available") def test_dirty_worker_exists(self): """Dirty arbiter should spawn dirty worker(s).""" with DockerContainer() as container: pids = container.get_gunicorn_pids() assert pids["master"] is not None assert pids["dirty-arbiter"] is not None assert len(pids["dirty-workers"]) >= 1, ( "At least one dirty worker should be running" ) def test_dirty_worker_respawn(self): """Dirty arbiter should respawn killed dirty workers.""" with DockerContainer() as container: pids = container.get_gunicorn_pids() old_dirty_worker_pid = pids["dirty-workers"][0] # Kill the dirty worker container.kill_process(old_dirty_worker_pid, signal=9) # Wait for respawn start = time.time() new_dirty_worker_pid = None while time.time() - start < 10: pids = container.get_gunicorn_pids() if pids["dirty-workers"]: new_pid = pids["dirty-workers"][0] if new_pid != old_dirty_worker_pid: new_dirty_worker_pid = new_pid break time.sleep(0.5) assert new_dirty_worker_pid is not None, ( "Dirty arbiter should respawn killed dirty worker" ) if __name__ == "__main__": pytest.main([__file__, "-v"]) benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/000077500000000000000000000000001514360242400233405ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/Dockerfile000066400000000000000000000006611514360242400253350ustar00rootroot00000000000000FROM python:3.12-slim RUN apt-get update && apt-get install -y --no-install-recommends \ curl procps \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # Install gunicorn from source COPY . /gunicorn-src/ RUN pip install --no-cache-dir /gunicorn-src/ # Copy test app COPY tests/docker/dirty_ttin_ttou/app.py /app/ COPY tests/docker/dirty_ttin_ttou/gunicorn_conf.py /app/ CMD ["gunicorn", "-c", "gunicorn_conf.py", "app:app"] benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/__init__.py000066400000000000000000000002561514360242400254540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Docker integration tests for dirty arbiter TTIN/TTOU signals.""" benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/app.py000066400000000000000000000040211514360242400244670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Test app with multiple dirty tasks for TTIN/TTOU testing.""" import json import time from gunicorn.dirty import DirtyApp, get_dirty_client # Unlimited workers - runs on all dirty workers class UnlimitedTask(DirtyApp): """Task that runs on all dirty workers.""" def setup(self): pass def process(self, data): return {"task": "unlimited", "data": data} # Limited to 2 workers class LimitedTask(DirtyApp): """Task limited to 2 workers.""" workers = 2 def setup(self): pass def process(self, data): delay = data.get("delay", 0) if delay: time.sleep(delay) return {"task": "limited", "data": data} def app(environ, start_response): """Simple WSGI app for testing.""" path = environ.get('PATH_INFO', '/') if path == '/health': start_response('200 OK', [('Content-Type', 'text/plain')]) return [b'OK'] if path == '/unlimited': try: client = get_dirty_client() result = client.execute('app:UnlimitedTask', {'test': 'data'}) start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps(result).encode()] except Exception as e: start_response('500 Internal Server Error', [('Content-Type', 'text/plain')]) return [str(e).encode()] if path == '/limited': try: client = get_dirty_client() result = client.execute('app:LimitedTask', {'test': 'data'}) start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps(result).encode()] except Exception as e: start_response('500 Internal Server Error', [('Content-Type', 'text/plain')]) return [str(e).encode()] start_response('404 Not Found', [('Content-Type', 'text/plain')]) return [b'Not Found'] benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/docker-compose.yml000066400000000000000000000005171514360242400270000ustar00rootroot00000000000000services: gunicorn: build: context: ../../.. dockerfile: tests/docker/dirty_ttin_ttou/Dockerfile ports: - "18000:8000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 2s timeout: 5s retries: 15 start_period: 5s stop_grace_period: 10s benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/gunicorn_conf.py000066400000000000000000000006741514360242400265520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Gunicorn configuration for TTIN/TTOU testing.""" bind = "0.0.0.0:8000" workers = 2 worker_class = "gthread" threads = 2 # Dirty arbiter config dirty_apps = [ "app:UnlimitedTask", "app:LimitedTask", # Has workers=2 attribute ] dirty_workers = 3 dirty_timeout = 30 # Logging loglevel = "debug" accesslog = "-" errorlog = "-" benoitc-gunicorn-f5fb19e/tests/docker/dirty_ttin_ttou/test_ttin_ttou_docker.py000066400000000000000000000136471514360242400303440ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Docker integration tests for dirty arbiter TTIN/TTOU signals.""" import os import subprocess import time from pathlib import Path import pytest import requests pytestmark = [ pytest.mark.docker, pytest.mark.integration, ] # Directory containing this test file TEST_DIR = Path(__file__).parent COMPOSE_FILE = TEST_DIR / "docker-compose.yml" BASE_URL = "http://localhost:18000" @pytest.fixture(scope="module") def docker_services(): """Start Docker services for the test module.""" # Start services subprocess.run( ["docker", "compose", "-f", str(COMPOSE_FILE), "up", "-d", "--build"], check=True, cwd=TEST_DIR ) # Wait for health for _ in range(30): try: resp = requests.get(f"{BASE_URL}/health", timeout=2) if resp.status_code == 200: break except requests.RequestException: pass time.sleep(1) else: # Print logs for debugging subprocess.run( ["docker", "compose", "-f", str(COMPOSE_FILE), "logs"], cwd=TEST_DIR ) pytest.fail("Services did not become healthy") yield # Cleanup subprocess.run( ["docker", "compose", "-f", str(COMPOSE_FILE), "down", "-v"], cwd=TEST_DIR ) def get_dirty_arbiter_pid(): """Get the dirty arbiter PID from the container.""" result = subprocess.run( ["docker", "compose", "-f", str(COMPOSE_FILE), "exec", "-T", "gunicorn", "pgrep", "-f", "dirty-arbiter"], capture_output=True, text=True, cwd=TEST_DIR ) pids = result.stdout.strip().split('\n') # Return the first PID (there should only be one dirty-arbiter) return int(pids[0]) if pids and pids[0] else None def get_dirty_worker_count(): """Get the current number of dirty workers.""" result = subprocess.run( ["docker", "compose", "-f", str(COMPOSE_FILE), "exec", "-T", "gunicorn", "pgrep", "-c", "-f", "dirty-worker"], capture_output=True, text=True, cwd=TEST_DIR ) count = result.stdout.strip() return int(count) if count else 0 def send_signal_to_dirty_arbiter(sig): """Send a signal to the dirty arbiter.""" pid = get_dirty_arbiter_pid() if pid is None: raise RuntimeError("Could not find dirty arbiter PID") subprocess.run( ["docker", "compose", "-f", str(COMPOSE_FILE), "exec", "-T", "gunicorn", "kill", f"-{sig}", str(pid)], check=True, cwd=TEST_DIR ) class TestTTINSignal: """Test SIGTTIN increases dirty workers.""" def test_ttin_increases_workers(self, docker_services): """TTIN should spawn additional dirty worker.""" initial_count = get_dirty_worker_count() assert initial_count == 3, f"Expected 3 initial workers, got {initial_count}" send_signal_to_dirty_arbiter("TTIN") time.sleep(2) # Wait for worker to spawn new_count = get_dirty_worker_count() assert new_count == 4, f"Expected 4 workers after TTIN, got {new_count}" def test_multiple_ttin_increases(self, docker_services): """Multiple TTIN signals should keep increasing workers.""" # Get current count (may be 4 from previous test) current_count = get_dirty_worker_count() send_signal_to_dirty_arbiter("TTIN") time.sleep(2) new_count = get_dirty_worker_count() assert new_count == current_count + 1 class TestTTOUSignal: """Test SIGTTOU decreases dirty workers.""" def test_ttou_decreases_workers(self, docker_services): """TTOU should kill a dirty worker.""" # First make sure we have more than minimum send_signal_to_dirty_arbiter("TTIN") time.sleep(2) count_before = get_dirty_worker_count() send_signal_to_dirty_arbiter("TTOU") time.sleep(2) count_after = get_dirty_worker_count() assert count_after == count_before - 1 def test_ttou_respects_minimum(self, docker_services): """TTOU should not go below app minimum (2 for LimitedTask).""" # Try to decrease multiple times for _ in range(10): send_signal_to_dirty_arbiter("TTOU") time.sleep(0.5) time.sleep(2) # Wait for all signals to be processed # Should not go below 2 (LimitedTask.workers = 2) final_count = get_dirty_worker_count() assert final_count >= 2, f"Worker count {final_count} is below minimum of 2" class TestUnlimitedApps: """Test apps with worker_count=None work correctly.""" def test_unlimited_app_works(self, docker_services): """UnlimitedTask should work.""" resp = requests.get(f"{BASE_URL}/unlimited", timeout=10) assert resp.status_code == 200 data = resp.json() assert data["task"] == "unlimited" def test_limited_app_works(self, docker_services): """LimitedTask should work.""" resp = requests.get(f"{BASE_URL}/limited", timeout=10) assert resp.status_code == 200 data = resp.json() assert data["task"] == "limited" def test_apps_work_after_scaling(self, docker_services): """Both apps should work after scaling up and down.""" # Scale up send_signal_to_dirty_arbiter("TTIN") time.sleep(2) # Test both apps resp = requests.get(f"{BASE_URL}/unlimited", timeout=10) assert resp.status_code == 200 resp = requests.get(f"{BASE_URL}/limited", timeout=10) assert resp.status_code == 200 # Scale down send_signal_to_dirty_arbiter("TTOU") time.sleep(2) # Test both apps again resp = requests.get(f"{BASE_URL}/unlimited", timeout=10) assert resp.status_code == 200 resp = requests.get(f"{BASE_URL}/limited", timeout=10) assert resp.status_code == 200 benoitc-gunicorn-f5fb19e/tests/docker/http2/000077500000000000000000000000001514360242400211355ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/http2/Dockerfile.gunicorn000066400000000000000000000013061514360242400247520ustar00rootroot00000000000000FROM python:3.12-slim # Install build dependencies for h2 and other packages RUN apt-get update && apt-get install -y --no-install-recommends \ gcc \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # Copy the gunicorn source code and install it COPY . /gunicorn-src/ RUN pip install --no-cache-dir /gunicorn-src/[http2] # Copy the test application COPY tests/docker/http2/app.py /app/app.py EXPOSE 8443 CMD ["gunicorn", "app:app", \ "--bind", "0.0.0.0:8443", \ "--worker-class", "gthread", \ "--threads", "4", \ "--http-protocols", "h2,h1", \ "--certfile", "/certs/server.crt", \ "--keyfile", "/certs/server.key", \ "--workers", "2", \ "--log-level", "debug"] benoitc-gunicorn-f5fb19e/tests/docker/http2/Dockerfile.nginx000066400000000000000000000003061514360242400242500ustar00rootroot00000000000000FROM nginx:1.29-alpine # Install curl for healthcheck RUN apk add --no-cache curl # Copy nginx configuration COPY nginx.conf /etc/nginx/nginx.conf EXPOSE 8444 CMD ["nginx", "-g", "daemon off;"] benoitc-gunicorn-f5fb19e/tests/docker/http2/README.rst000066400000000000000000000053221514360242400226260ustar00rootroot00000000000000HTTP/2 Docker Integration Tests ================================ This directory contains Docker-based integration tests for HTTP/2 support in Gunicorn. These tests verify real HTTP/2 connections using actual HTTP/2 clients, both directly to Gunicorn and through an nginx reverse proxy. Prerequisites ------------- - Docker and Docker Compose - OpenSSL (for generating test certificates) - Python with ``httpx[http2]`` installed Running the Tests ----------------- 1. Install test dependencies:: pip install -e ".[testing]" 2. Generate SSL certificates (done automatically by tests, or manually):: cd tests/docker/http2 openssl req -x509 -newkey rsa:2048 \ -keyout certs/server.key \ -out certs/server.crt \ -days 1 -nodes \ -subj "/CN=localhost" 3. Run the Docker integration tests:: # From the project root pytest tests/docker/http2/ -v Or with Docker Compose manually:: cd tests/docker/http2 docker compose up -d pytest -v docker compose down -v Test Categories --------------- - **TestDirectHTTP2Connection**: Direct HTTP/2 connections to Gunicorn - **TestConcurrentStreams**: HTTP/2 multiplexing with concurrent streams - **TestHTTP2BehindProxy**: HTTP/2 through nginx reverse proxy - **TestHTTP2Protocol**: ALPN negotiation and protocol fallback - **TestHTTP2ErrorHandling**: Error responses over HTTP/2 - **TestHTTP2Headers**: HTTP/2 header handling - **TestHTTP2Performance**: Performance-related tests Architecture ------------ :: +--------+ HTTP/2 +-----------+ | Client | --------------> | Gunicorn | +--------+ | (port 8443)| | +-----------+ | | HTTP/2 +-------+ HTTPS +-----------+ +---------------> | nginx | -----------> | Gunicorn | | proxy | | (port 8443)| | (8444)| +-----------+ +-------+ Files ----- - ``docker-compose.yml`` - Service definitions - ``Dockerfile.gunicorn`` - Gunicorn container with HTTP/2 - ``Dockerfile.nginx`` - nginx HTTP/2 proxy - ``nginx.conf`` - nginx configuration - ``app.py`` - Test WSGI application - ``conftest.py`` - Pytest fixtures for Docker - ``test_http2_docker.py`` - Integration tests Troubleshooting --------------- If tests fail to start: 1. Check Docker is running:: docker info 2. Check service logs:: cd tests/docker/http2 docker compose logs gunicorn-h2 docker compose logs nginx-h2 3. Verify certificates:: openssl x509 -in certs/server.crt -text -noout 4. Test manually with curl:: curl -k --http2 https://localhost:8443/ curl -k --http2 https://localhost:8444/ benoitc-gunicorn-f5fb19e/tests/docker/http2/__init__.py000066400000000000000000000002311514360242400232420ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """HTTP/2 Docker integration tests package.""" benoitc-gunicorn-f5fb19e/tests/docker/http2/app.py000066400000000000000000000121101514360242400222620ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Test WSGI application for HTTP/2 Docker integration tests.""" import json def app(environ, start_response): """Simple WSGI app for testing HTTP/2 functionality.""" path = environ.get('PATH_INFO', '/') method = environ.get('REQUEST_METHOD', 'GET') if path == '/': body = b'Hello HTTP/2!' status = '200 OK' content_type = 'text/plain' elif path == '/health': body = b'OK' status = '200 OK' content_type = 'text/plain' elif path == '/echo': # Echo back the request body content_length = int(environ.get('CONTENT_LENGTH', 0) or 0) body = environ['wsgi.input'].read(content_length) status = '200 OK' content_type = 'application/octet-stream' elif path == '/headers': # Return all HTTP headers as JSON headers = {} for key, value in environ.items(): if key.startswith('HTTP_'): headers[key] = value # Also include some important non-HTTP_ headers for key in ['CONTENT_TYPE', 'CONTENT_LENGTH', 'REQUEST_METHOD', 'PATH_INFO', 'QUERY_STRING', 'SERVER_PROTOCOL']: if key in environ: headers[key] = str(environ[key]) body = json.dumps(headers, indent=2).encode('utf-8') status = '200 OK' content_type = 'application/json' elif path == '/version': # Return HTTP version info server_protocol = environ.get('SERVER_PROTOCOL', 'HTTP/1.1') body = server_protocol.encode('utf-8') status = '200 OK' content_type = 'text/plain' elif path == '/large': # Return a large response (1MB) for testing streaming body = b'X' * (1024 * 1024) status = '200 OK' content_type = 'application/octet-stream' elif path == '/stream': # Return a streaming response def generate(): for i in range(10): yield f'chunk-{i}\n'.encode('utf-8') start_response('200 OK', [ ('Content-Type', 'text/plain'), ('Transfer-Encoding', 'chunked') ]) return generate() elif path == '/status': # Return a specific status code based on query string query = environ.get('QUERY_STRING', '') try: code = int(query.split('=')[1]) if '=' in query else 200 except (ValueError, IndexError): code = 200 status_messages = { 200: 'OK', 201: 'Created', 204: 'No Content', 400: 'Bad Request', 404: 'Not Found', 500: 'Internal Server Error', } status = f'{code} {status_messages.get(code, "Unknown")}' body = f'Status: {code}'.encode('utf-8') content_type = 'text/plain' elif path == '/delay': # Simulate a slow response import time query = environ.get('QUERY_STRING', '') try: delay = float(query.split('=')[1]) if '=' in query else 1.0 delay = min(delay, 5.0) # Cap at 5 seconds except (ValueError, IndexError): delay = 1.0 time.sleep(delay) body = f'Delayed {delay}s'.encode('utf-8') status = '200 OK' content_type = 'text/plain' elif path == '/method': # Return the request method body = method.encode('utf-8') status = '200 OK' content_type = 'text/plain' elif path == '/early-hints': # Test endpoint for 103 Early Hints # Send early hints if the callback is available if 'wsgi.early_hints' in environ: environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ('Link', '; rel=preload; as=script'), ]) body = b'Early hints sent!' status = '200 OK' content_type = 'text/plain' elif path == '/early-hints-multiple': # Test endpoint for multiple 103 Early Hints responses if 'wsgi.early_hints' in environ: # First early hints environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ]) # Second early hints environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=script'), ]) body = b'Multiple early hints sent!' status = '200 OK' content_type = 'text/plain' else: body = b'Not Found' status = '404 Not Found' content_type = 'text/plain' response_headers = [ ('Content-Type', content_type), ('Content-Length', str(len(body))), ('X-Request-Path', path), ('X-Request-Method', method), ] start_response(status, response_headers) return [body] # For running directly with python if __name__ == '__main__': from wsgiref.simple_server import make_server server = make_server('localhost', 8000, app) print('Serving on http://localhost:8000') server.serve_forever() benoitc-gunicorn-f5fb19e/tests/docker/http2/certs/000077500000000000000000000000001514360242400222555ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/http2/certs/.gitkeep000066400000000000000000000002471514360242400237110ustar00rootroot00000000000000# This directory contains SSL certificates generated for testing. # Certificates are generated automatically by conftest.py. # Do not commit actual certificate files. benoitc-gunicorn-f5fb19e/tests/docker/http2/certs/server.crt000066400000000000000000000023651514360242400243030ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDfDCCAmSgAwIBAgIUDxTarKRHe0FIyczGmoYwm377ZpcwDQYJKoZIhvcNAQEL BQAwOTESMBAGA1UEAwwJbG9jYWxob3N0MRYwFAYDVQQKDA1HdW5pY29ybiBUZXN0 MQswCQYDVQQGEwJVUzAeFw0yNjAyMDUxMTE1MjJaFw0yNjAyMDYxMTE1MjJaMDkx EjAQBgNVBAMMCWxvY2FsaG9zdDEWMBQGA1UECgwNR3VuaWNvcm4gVGVzdDELMAkG A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCRQTHakkqY 6l6dMqfs4oiY98+rjvZubwjp0PH7UBuxXCi/4Ao78o0JhKcs+jgAGAXyb8eRjEKt z4rPoHZYE91D/eD0lWAz9r/LRoutDJd9IO0rfDtHlYXamciuxJJ8cckOrnuTXLtq AWqjKR3U9RIDD3eumCKG4l7Py0L67zTomwMRPfeIdlWBfxGjWMqOewdTc/O/cuK2 HL5JP2ixy+iTufs0jhljI9cbu49J606f+TQH9eXRTD716q+KsHPJX1X5dVd7V7Lr FIp7wSUFdbiy56JfmrGmfJbZgFH67P0ZyiTpQBaVHt1YYRIcOUJZqM+0MAtrsySC TNA/LsI8tsybAgMBAAGjfDB6MB0GA1UdDgQWBBRK2VkAeM0hL4j/45ckkKbGrb/Q FjAfBgNVHSMEGDAWgBRK2VkAeM0hL4j/45ckkKbGrb/QFjAPBgNVHRMBAf8EBTAD AQH/MCcGA1UdEQQgMB6CCWxvY2FsaG9zdIILZ3VuaWNvcm4taDKHBH8AAAEwDQYJ KoZIhvcNAQELBQADggEBAAXwuw0KTQUC4UEFudQ1rceK6By9WCSJND7xJi+UQ50G Zrp5tJ2YB4ZWY+APadfuJo+zUxYVZ3jhs0mxgVeiGdDW6yZdHkeX8MlXBTLHR+/a A7DXn6wCw9NDeDtcY/bKg5iamvoGGTL6szPrqeuZPz4UdbsFlr0MdcjgSNOqnkjr YS4ukgZ71aWSjfraRRPjFMzkfnQ1xm96A1ngMH4DvU/t62D7r8+SvxQ8M6ERL84Z FBu4bTXDdYIjJ24ojmDDO2irTVW1FMGXQTPzMaTEbE1rvBYeEYhf10KiMynK9xfO 5j8LWmCkgek0CqBrf3zbDEwu8QxcaxITAIUkSXLOZbo= -----END CERTIFICATE----- benoitc-gunicorn-f5fb19e/tests/docker/http2/certs/server.key000066400000000000000000000032501514360242400242750ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCRQTHakkqY6l6d Mqfs4oiY98+rjvZubwjp0PH7UBuxXCi/4Ao78o0JhKcs+jgAGAXyb8eRjEKtz4rP oHZYE91D/eD0lWAz9r/LRoutDJd9IO0rfDtHlYXamciuxJJ8cckOrnuTXLtqAWqj KR3U9RIDD3eumCKG4l7Py0L67zTomwMRPfeIdlWBfxGjWMqOewdTc/O/cuK2HL5J P2ixy+iTufs0jhljI9cbu49J606f+TQH9eXRTD716q+KsHPJX1X5dVd7V7LrFIp7 wSUFdbiy56JfmrGmfJbZgFH67P0ZyiTpQBaVHt1YYRIcOUJZqM+0MAtrsySCTNA/ LsI8tsybAgMBAAECggEANBhGOYZLI9G2sjlXOaG7bOU/wV9KKaw/7Z/HEaOW8wLD CKHg+cQRai79yCdLi1kSVPNbB2vfBDhRqAp8NzWUn0x/8ChcsvZVriF0edFwyWtU NErfddp+Absy2t9cTC6A9feFEYJqIug0JyVZciWc2qUi/ubIR0kLyQm00YuWFa/s GJou8Nhg70rqW+3FB1H8kAEXqob+PFW4xbTwexw1+MbHxN7UKLTzS8uzYGLo2UpB 7bksumyD0o+lZtlx9HZ6CwrB6IPjgJ0HyaD8SrOc7/ozd7rR2LmvMmBCV1uC5VSO jhr0PScLoNv60fjkVOiF9uqaPY2kNKymsOzpZ7/mwQKBgQDMcz+ve8WGGbE+bbM7 2uinQ5smm8rWPnfbHJIHQUetrEQKljRovybmjiiXN08uxlX6VA/Vnp4fmL5fzsTD xTeiCVPsR1huXIfMLGJ6crUgvlbiaB8XsxtVNBpfEEtBe27qjSIj3xtmwqM6+LD1 FKLsYzgotHUH9JwyLA1RMKPBwQKBgQC14QWtI5YtZcTX46BqxlZ07iAAuy19Jywn UtgmTawkJuEcseewIjxtJkMz+aSy7V3PsLII8tY48oSjAVx84w50zLJ2OlJnFT1S zEmIOu9YDcGLZkYXJ2AwndRAIXpJVHwtFM9eDSMh+wVPBFeboYP1dO/VxmN6QV0W GqDaQfItWwKBgEb31mp2n0j+UB0ofSfQxCOTfx62w4D87CPd1f64tUXe3zuBii21 9K3hOMvMwiqtZBjh5yEyzxaOsb6WCo0eP0J61GvXFCYy7lx8J67zdFYqXAR5OhnC 7UD1NhY7lLPlQcofNXOYNW3FMF3/B4X7JNbDVjIi+eDKExIDYpgFN0LBAoGADGCf 7kR5t+UxHDAVfq64u4RpESOr2NSNoK92nkSy7lLnBvjkd4wc6KCt+h+HIdYdiEDS HOHJyl5WwHEbRjR9i11S19DoQrOjVLsqVecM2sU04rO3GWRIm4ZiJ2sf01W4jajY 4+Go/msC1XnKLIE1ZcLrf3Tc2DkSiKqPP8s1G/kCgYA8sCPAXedwhULhOBM45x4J vkwT1Icm5RHOwOr8t34IFozTLokba6pjhYua3nE+V3FglRct7NpX+Op4gUgHa80g 5zoHboq5/pTUTclx41jndC1YGa3NLvthDWTWmyo/Qj7F/R7jGJf8E3KUDe0tFoSp JlfEuUHtKpFJReBnmWTFiQ== -----END PRIVATE KEY----- benoitc-gunicorn-f5fb19e/tests/docker/http2/conftest.py000066400000000000000000000132751514360242400233440ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Pytest fixtures for HTTP/2 Docker integration tests.""" import subprocess import time from pathlib import Path import pytest # Directory containing this conftest.py DOCKER_DIR = Path(__file__).parent CERTS_DIR = DOCKER_DIR / "certs" def generate_self_signed_cert(certs_dir: Path) -> None: """Generate self-signed SSL certificates for testing.""" certs_dir.mkdir(parents=True, exist_ok=True) cert_file = certs_dir / "server.crt" key_file = certs_dir / "server.key" # Skip if certs already exist and are recent (less than 1 day old) if cert_file.exists() and key_file.exists(): age = time.time() - cert_file.stat().st_mtime if age < 86400: # 1 day return # Generate self-signed certificate subprocess.run( [ "openssl", "req", "-x509", "-newkey", "rsa:2048", "-keyout", str(key_file), "-out", str(cert_file), "-days", "1", "-nodes", "-subj", "/CN=localhost/O=Gunicorn Test/C=US", "-addext", "subjectAltName=DNS:localhost,DNS:gunicorn-h2,IP:127.0.0.1" ], check=True, capture_output=True ) # Set readable permissions cert_file.chmod(0o644) key_file.chmod(0o644) def wait_for_service(url: str, timeout: int = 60) -> bool: """Wait for a service to become available.""" import ssl import socket from urllib.parse import urlparse parsed = urlparse(url) host = parsed.hostname or 'localhost' port = parsed.port or 443 start_time = time.time() while time.time() - start_time < timeout: try: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE with socket.create_connection((host, port), timeout=5) as sock: with ctx.wrap_socket(sock, server_hostname=host): return True except (socket.error, ssl.SSLError, OSError): time.sleep(1) return False @pytest.fixture(scope="session") def docker_compose_file(): """Return the path to docker-compose.yml.""" return DOCKER_DIR / "docker-compose.yml" @pytest.fixture(scope="session") def certs_dir(): """Generate and return the certs directory.""" generate_self_signed_cert(CERTS_DIR) return CERTS_DIR @pytest.fixture(scope="session") def docker_services(docker_compose_file, certs_dir): """Start Docker services for the test session.""" compose_file = str(docker_compose_file) # Check if Docker is available try: subprocess.run( ["docker", "info"], check=True, capture_output=True ) except (subprocess.CalledProcessError, FileNotFoundError): pytest.skip("Docker is not available") # Check if docker compose is available try: subprocess.run( ["docker", "compose", "version"], check=True, capture_output=True ) except subprocess.CalledProcessError: pytest.skip("Docker Compose is not available") # Build and start services try: subprocess.run( ["docker", "compose", "-f", compose_file, "build"], check=True, cwd=DOCKER_DIR ) subprocess.run( ["docker", "compose", "-f", compose_file, "up", "-d"], check=True, cwd=DOCKER_DIR ) # Wait for services to be healthy gunicorn_ready = wait_for_service("https://127.0.0.1:8443", timeout=60) nginx_ready = wait_for_service("https://127.0.0.1:8444", timeout=60) if not gunicorn_ready: # Get logs for debugging result = subprocess.run( ["docker", "compose", "-f", compose_file, "logs", "gunicorn-h2"], capture_output=True, text=True, cwd=DOCKER_DIR ) pytest.fail(f"Gunicorn service failed to start. Logs:\n{result.stdout}\n{result.stderr}") if not nginx_ready: result = subprocess.run( ["docker", "compose", "-f", compose_file, "logs", "nginx-h2"], capture_output=True, text=True, cwd=DOCKER_DIR ) pytest.fail(f"Nginx service failed to start. Logs:\n{result.stdout}\n{result.stderr}") yield { "gunicorn": "https://127.0.0.1:8443", "nginx": "https://127.0.0.1:8444" } finally: # Stop and remove services subprocess.run( ["docker", "compose", "-f", compose_file, "down", "-v", "--remove-orphans"], cwd=DOCKER_DIR, capture_output=True ) @pytest.fixture def gunicorn_url(docker_services): """Return the gunicorn service URL.""" return docker_services["gunicorn"] @pytest.fixture def nginx_url(docker_services): """Return the nginx proxy URL.""" return docker_services["nginx"] @pytest.fixture def h2_client(): """Create an HTTP/2 capable client.""" httpx = pytest.importorskip("httpx") client = httpx.Client(http2=True, verify=False, timeout=30.0) yield client client.close() @pytest.fixture def h1_client(): """Create an HTTP/1.1 only client.""" httpx = pytest.importorskip("httpx") client = httpx.Client(http2=False, verify=False, timeout=30.0) yield client client.close() @pytest.fixture def async_h2_client(): """Create an async HTTP/2 capable client.""" httpx = pytest.importorskip("httpx") async def create_client(): return httpx.AsyncClient(http2=True, verify=False, timeout=30.0) return create_client benoitc-gunicorn-f5fb19e/tests/docker/http2/docker-compose.yml000066400000000000000000000022211514360242400245670ustar00rootroot00000000000000services: gunicorn-h2: build: context: ../../../ dockerfile: tests/docker/http2/Dockerfile.gunicorn ports: - "8443:8443" volumes: - ./certs:/certs:ro - ./app.py:/app/app.py:ro environment: - GUNICORN_CERTFILE=/certs/server.crt - GUNICORN_KEYFILE=/certs/server.key healthcheck: test: ["CMD", "python", "-c", "import ssl,socket; s=socket.socket(); s.settimeout(1); ctx=ssl.create_default_context(); ctx.check_hostname=False; ctx.verify_mode=ssl.CERT_NONE; ss=ctx.wrap_socket(s,server_hostname='localhost'); ss.connect(('localhost',8443)); ss.close()"] interval: 2s timeout: 5s retries: 15 start_period: 5s nginx-h2: build: context: . dockerfile: Dockerfile.nginx ports: - "8444:8444" volumes: - ./certs:/certs:ro - ./nginx.conf:/etc/nginx/nginx.conf:ro depends_on: gunicorn-h2: condition: service_healthy healthcheck: test: ["CMD", "curl", "-k", "-f", "https://localhost:8444/health"] interval: 2s timeout: 5s retries: 15 start_period: 5s networks: default: driver: bridge benoitc-gunicorn-f5fb19e/tests/docker/http2/nginx.conf000066400000000000000000000041411514360242400231270ustar00rootroot00000000000000worker_processes auto; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; keepalive_timeout 65; upstream gunicorn_h2 { server gunicorn-h2:8443; keepalive 32; } server { listen 8444 ssl; http2 on; server_name localhost; ssl_certificate /certs/server.crt; ssl_certificate_key /certs/server.key; ssl_protocols TLSv1.2 TLSv1.3; ssl_ciphers HIGH:!aNULL:!MD5; ssl_prefer_server_ciphers on; # HTTP/2 settings http2_max_concurrent_streams 128; # Health check endpoint location /health { return 200 'OK'; add_header Content-Type text/plain; } location / { # Proxy to gunicorn with HTTPS proxy_pass https://gunicorn_h2; proxy_http_version 1.1; proxy_ssl_verify off; proxy_ssl_server_name on; # Enable forwarding of 103 Early Hints from upstream # $http2 is set to "h2" when HTTP/2 is used, empty otherwise early_hints $http2; # Headers proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Port $server_port; # Buffering settings proxy_buffering on; proxy_buffer_size 4k; proxy_buffers 8 4k; # Timeouts proxy_connect_timeout 60s; proxy_send_timeout 60s; proxy_read_timeout 60s; } } } benoitc-gunicorn-f5fb19e/tests/docker/http2/test_http2_docker.py000066400000000000000000000364771514360242400251570ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """HTTP/2 Docker integration tests. These tests verify HTTP/2 functionality with real connections to gunicorn running in Docker containers, both directly and through an nginx proxy. """ import asyncio import ssl import socket import pytest # Mark all tests in this module as requiring Docker pytestmark = [ pytest.mark.docker, pytest.mark.http2, pytest.mark.integration, ] class TestDirectHTTP2Connection: """Test direct HTTP/2 connections to gunicorn.""" def test_simple_get(self, h2_client, gunicorn_url): """Test basic GET request over HTTP/2.""" response = h2_client.get(f"{gunicorn_url}/") assert response.status_code == 200 assert response.http_version == "HTTP/2" assert response.text == "Hello HTTP/2!" def test_health_endpoint(self, h2_client, gunicorn_url): """Test health check endpoint.""" response = h2_client.get(f"{gunicorn_url}/health") assert response.status_code == 200 assert response.text == "OK" def test_post_with_body(self, h2_client, gunicorn_url): """Test POST request with body.""" data = b"test data for echo" response = h2_client.post(f"{gunicorn_url}/echo", content=data) assert response.status_code == 200 assert response.content == data def test_post_large_body(self, h2_client, gunicorn_url): """Test POST with larger body.""" data = b"X" * 65536 # 64KB response = h2_client.post(f"{gunicorn_url}/echo", content=data) assert response.status_code == 200 assert response.content == data assert len(response.content) == 65536 def test_headers_endpoint(self, h2_client, gunicorn_url): """Test that custom headers are received.""" response = h2_client.get( f"{gunicorn_url}/headers", headers={"X-Custom-Header": "test-value"} ) assert response.status_code == 200 headers = response.json() assert "HTTP_X_CUSTOM_HEADER" in headers assert headers["HTTP_X_CUSTOM_HEADER"] == "test-value" def test_version_endpoint(self, h2_client, gunicorn_url): """Test server protocol version.""" response = h2_client.get(f"{gunicorn_url}/version") assert response.status_code == 200 # HTTP/2 should report as HTTP/2.0 or similar assert "HTTP" in response.text def test_large_response(self, h2_client, gunicorn_url): """Test receiving large response over HTTP/2.""" response = h2_client.get(f"{gunicorn_url}/large") assert response.status_code == 200 assert len(response.content) == 1024 * 1024 # 1MB assert response.content == b"X" * (1024 * 1024) def test_different_methods(self, h2_client, gunicorn_url): """Test various HTTP methods.""" for method in ["GET", "POST", "PUT", "DELETE", "PATCH"]: response = h2_client.request(method, f"{gunicorn_url}/method") assert response.status_code == 200 assert response.text == method def test_status_codes(self, h2_client, gunicorn_url): """Test various HTTP status codes.""" for code in [200, 201, 400, 404, 500]: response = h2_client.get(f"{gunicorn_url}/status?code={code}") assert response.status_code == code def test_not_found(self, h2_client, gunicorn_url): """Test 404 response.""" response = h2_client.get(f"{gunicorn_url}/nonexistent") assert response.status_code == 404 class TestConcurrentStreams: """Test HTTP/2 multiplexing with concurrent streams.""" @pytest.mark.asyncio async def test_concurrent_requests(self, async_h2_client, gunicorn_url): """Test multiple concurrent requests over single connection.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: # Send 10 concurrent requests tasks = [ client.get(f"{gunicorn_url}/") for _ in range(10) ] responses = await asyncio.gather(*tasks) assert len(responses) == 10 assert all(r.status_code == 200 for r in responses) assert all(r.http_version == "HTTP/2" for r in responses) assert all(r.text == "Hello HTTP/2!" for r in responses) @pytest.mark.asyncio async def test_concurrent_mixed_requests(self, async_h2_client, gunicorn_url): """Test concurrent requests to different endpoints.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: tasks = [ client.get(f"{gunicorn_url}/"), client.get(f"{gunicorn_url}/headers"), client.get(f"{gunicorn_url}/version"), client.post(f"{gunicorn_url}/echo", content=b"test"), client.get(f"{gunicorn_url}/health"), ] responses = await asyncio.gather(*tasks) assert len(responses) == 5 assert all(r.status_code == 200 for r in responses) @pytest.mark.asyncio async def test_many_concurrent_streams(self, async_h2_client, gunicorn_url): """Test many concurrent streams (up to HTTP/2 limit).""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=60.0) as client: # Send 50 concurrent requests tasks = [ client.get(f"{gunicorn_url}/") for _ in range(50) ] responses = await asyncio.gather(*tasks) assert len(responses) == 50 assert all(r.status_code == 200 for r in responses) class TestHTTP2BehindProxy: """Test HTTP/2 through nginx proxy.""" def test_simple_get_via_proxy(self, h2_client, nginx_url): """Test basic GET through nginx proxy.""" response = h2_client.get(f"{nginx_url}/") assert response.status_code == 200 assert response.http_version == "HTTP/2" assert response.text == "Hello HTTP/2!" def test_post_via_proxy(self, h2_client, nginx_url): """Test POST through nginx proxy.""" data = b"proxied data" response = h2_client.post(f"{nginx_url}/echo", content=data) assert response.status_code == 200 assert response.content == data def test_headers_preserved(self, h2_client, nginx_url): """Test that custom headers pass through proxy.""" response = h2_client.get( f"{nginx_url}/headers", headers={"X-Custom": "test-value"} ) assert response.status_code == 200 headers = response.json() assert "HTTP_X_CUSTOM" in headers assert headers["HTTP_X_CUSTOM"] == "test-value" def test_forwarded_headers(self, h2_client, nginx_url): """Test that proxy adds forwarded headers.""" response = h2_client.get(f"{nginx_url}/headers") assert response.status_code == 200 headers = response.json() # Nginx should add X-Forwarded-* headers assert "HTTP_X_FORWARDED_FOR" in headers assert "HTTP_X_FORWARDED_PROTO" in headers assert headers["HTTP_X_FORWARDED_PROTO"] == "https" def test_large_response_via_proxy(self, h2_client, nginx_url): """Test large response through proxy.""" response = h2_client.get(f"{nginx_url}/large") assert response.status_code == 200 assert len(response.content) == 1024 * 1024 @pytest.mark.asyncio async def test_concurrent_via_proxy(self, async_h2_client, nginx_url): """Test concurrent requests through proxy.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: tasks = [ client.get(f"{nginx_url}/") for _ in range(10) ] responses = await asyncio.gather(*tasks) assert len(responses) == 10 assert all(r.status_code == 200 for r in responses) assert all(r.http_version == "HTTP/2" for r in responses) class TestHTTP2Protocol: """Test HTTP/2 specific protocol behaviors.""" def test_alpn_negotiation(self, gunicorn_url): """Verify ALPN negotiates h2 protocol.""" ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE ctx.set_alpn_protocols(['h2', 'http/1.1']) with socket.create_connection(('127.0.0.1', 8443), timeout=10) as sock: with ctx.wrap_socket(sock, server_hostname='localhost') as ssock: selected = ssock.selected_alpn_protocol() assert selected == 'h2', f"Expected h2, got {selected}" def test_alpn_http11_fallback(self, gunicorn_url): """Test that server accepts HTTP/1.1 via ALPN.""" ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE ctx.set_alpn_protocols(['http/1.1']) with socket.create_connection(('127.0.0.1', 8443), timeout=10) as sock: with ctx.wrap_socket(sock, server_hostname='localhost') as ssock: selected = ssock.selected_alpn_protocol() assert selected == 'http/1.1', f"Expected http/1.1, got {selected}" def test_http11_client_works(self, h1_client, gunicorn_url): """Test that HTTP/1.1 client can still connect.""" response = h1_client.get(f"{gunicorn_url}/") assert response.status_code == 200 assert response.http_version == "HTTP/1.1" assert response.text == "Hello HTTP/2!" def test_tls_version(self, gunicorn_url): """Verify TLS 1.2+ is used.""" ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE with socket.create_connection(('127.0.0.1', 8443), timeout=10) as sock: with ctx.wrap_socket(sock, server_hostname='localhost') as ssock: version = ssock.version() assert version in ('TLSv1.2', 'TLSv1.3'), f"Unexpected TLS version: {version}" class TestHTTP2ErrorHandling: """Test HTTP/2 error handling.""" def test_invalid_path(self, h2_client, gunicorn_url): """Test request to non-existent path.""" response = h2_client.get(f"{gunicorn_url}/does/not/exist") assert response.status_code == 404 assert response.http_version == "HTTP/2" def test_server_error(self, h2_client, gunicorn_url): """Test server error response.""" response = h2_client.get(f"{gunicorn_url}/status?code=500") assert response.status_code == 500 assert response.http_version == "HTTP/2" @pytest.mark.asyncio async def test_connection_reuse_after_error(self, async_h2_client, gunicorn_url): """Test that connection is reused after error response.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: # First request - error r1 = await client.get(f"{gunicorn_url}/status?code=500") assert r1.status_code == 500 # Second request - should work on same connection r2 = await client.get(f"{gunicorn_url}/") assert r2.status_code == 200 assert r2.text == "Hello HTTP/2!" class TestHTTP2Headers: """Test HTTP/2 header handling.""" def test_response_headers(self, h2_client, gunicorn_url): """Test that response headers are correctly received.""" response = h2_client.get(f"{gunicorn_url}/") assert "content-type" in response.headers assert "content-length" in response.headers assert response.headers["x-request-path"] == "/" assert response.headers["x-request-method"] == "GET" def test_many_request_headers(self, h2_client, gunicorn_url): """Test sending many headers.""" headers = {f"X-Custom-{i}": f"value-{i}" for i in range(20)} response = h2_client.get(f"{gunicorn_url}/headers", headers=headers) assert response.status_code == 200 received = response.json() for i in range(20): key = f"HTTP_X_CUSTOM_{i}" assert key in received assert received[key] == f"value-{i}" def test_header_case_insensitivity(self, h2_client, gunicorn_url): """Test HTTP/2 header case handling.""" response = h2_client.get( f"{gunicorn_url}/headers", headers={"X-Mixed-Case-Header": "test"} ) assert response.status_code == 200 # HTTP/2 lowercases headers, but WSGI uppercases them headers = response.json() assert "HTTP_X_MIXED_CASE_HEADER" in headers class TestHTTP2Performance: """Performance-related HTTP/2 tests.""" @pytest.mark.asyncio async def test_parallel_large_requests(self, async_h2_client, gunicorn_url): """Test parallel requests with large responses.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=60.0) as client: tasks = [ client.get(f"{gunicorn_url}/large") for _ in range(5) ] responses = await asyncio.gather(*tasks) assert len(responses) == 5 assert all(r.status_code == 200 for r in responses) assert all(len(r.content) == 1024 * 1024 for r in responses) def test_connection_keepalive(self, h2_client, gunicorn_url): """Test that connections are kept alive.""" # Multiple requests should reuse the same connection for _ in range(5): response = h2_client.get(f"{gunicorn_url}/") assert response.status_code == 200 assert response.http_version == "HTTP/2" class TestHTTP2EarlyHints: """Test HTTP 103 Early Hints support.""" def test_early_hints_endpoint(self, h2_client, gunicorn_url): """Test that early hints endpoint returns 200.""" response = h2_client.get(f"{gunicorn_url}/early-hints") assert response.status_code == 200 assert response.text == "Early hints sent!" def test_early_hints_multiple_endpoint(self, h2_client, gunicorn_url): """Test multiple early hints endpoint returns 200.""" response = h2_client.get(f"{gunicorn_url}/early-hints-multiple") assert response.status_code == 200 assert response.text == "Multiple early hints sent!" def test_early_hints_via_proxy(self, h2_client, nginx_url): """Test early hints through nginx proxy.""" response = h2_client.get(f"{nginx_url}/early-hints") assert response.status_code == 200 assert response.text == "Early hints sent!" @pytest.mark.asyncio async def test_concurrent_early_hints(self, async_h2_client, gunicorn_url): """Test concurrent requests to early hints endpoint.""" httpx = pytest.importorskip("httpx") async with httpx.AsyncClient(http2=True, verify=False, timeout=30.0) as client: tasks = [ client.get(f"{gunicorn_url}/early-hints") for _ in range(10) ] responses = await asyncio.gather(*tasks) assert len(responses) == 10 assert all(r.status_code == 200 for r in responses) assert all(r.text == "Early hints sent!" for r in responses) benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/000077500000000000000000000000001514360242400237275ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/Dockerfile000066400000000000000000000012311514360242400257160ustar00rootroot00000000000000FROM python:3.12-slim WORKDIR /app # Copy gunicorn source COPY . /app/gunicorn-src # Install gunicorn and test dependencies # setproctitle is needed for process title changes (master, dirty-arbiter, etc.) RUN pip install --no-cache-dir /app/gunicorn-src pytest requests setproctitle # Copy test app files COPY tests/docker/per_app_allocation/app.py /app/ COPY tests/docker/per_app_allocation/gunicorn_conf.py /app/ # Install procps for process inspection and curl for healthcheck RUN apt-get update && apt-get install -y procps curl && rm -rf /var/lib/apt/lists/* # Default command - run gunicorn CMD ["gunicorn", "app:application", "-c", "gunicorn_conf.py"] benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/README.md000066400000000000000000000031761514360242400252150ustar00rootroot00000000000000# Per-App Worker Allocation E2E Tests End-to-end Docker-based tests for the per-app worker allocation feature. ## Overview These tests verify that: - Apps with worker limits are only loaded on the specified number of workers - Requests are routed only to workers that have the target app loaded - Round-robin distribution works correctly within limited worker sets - Worker crash scenarios maintain correct app allocation - Class attribute `workers=N` is respected - Config-based `:N` overrides class attributes ## Configuration The tests use 4 dirty workers with 3 apps: - **LightweightApp**: No limit (loads on all 4 workers) - **HeavyApp**: `workers=2` class attribute (loads on 2 workers) - **ConfigLimitedApp**: `:1` config (loads on 1 worker) ## Running Tests ```bash # From this directory cd tests/docker/per_app_allocation # Build the Docker image docker compose build # Run all tests pytest test_per_app_e2e.py -v # Run specific test pytest test_per_app_e2e.py::TestPerAppAllocation::test_config_limited_app_uses_one_worker -v ``` ## Test Categories ### TestPerAppAllocation - Tests basic functionality of per-app worker allocation - Verifies round-robin distribution - Tests app accessibility ### TestPerAppWorkerCrash - Tests behavior when workers crash - Verifies app recovery after worker respawn ### TestPerAppLogs - Verifies logging output contains expected information ## Requirements - Docker and Docker Compose - Python 3.8+ - pytest - requests ## Notes - Tests run on port 8001 to avoid conflicts with the existing dirty_arbiter tests on 8000 - The container uses a keep-alive wrapper to allow testing worker crash scenarios benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/app.py000066400000000000000000000131671514360242400250710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WSGI and Dirty applications for per-app worker allocation testing. Contains: - A WSGI app that can make dirty client requests - A lightweight dirty app (loads on all workers) - A heavy dirty app (limited to 2 workers via class attribute) - A config-limited app (limited to 1 worker via config) """ import json import os from gunicorn.dirty.app import DirtyApp def application(environ, start_response): """ WSGI application that invokes dirty apps and returns worker info. Routes: - GET /lightweight/ping - Call LightweightApp.ping() - GET /heavy/predict/ - Call HeavyApp.predict(data) - GET /config_limited/info - Call ConfigLimitedApp.get_info() - GET /status - Get overall status """ path = environ.get('PATH_INFO', '/') method = environ.get('REQUEST_METHOD', 'GET') if method != 'GET': start_response('405 Method Not Allowed', [('Content-Type', 'text/plain')]) return [b'Method not allowed'] # Import dirty client here to avoid import at module load from gunicorn.dirty import get_dirty_client try: client = get_dirty_client() if path == '/status': start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps({"status": "ok"}).encode()] elif path == '/lightweight/ping': result = client.execute("app:LightweightApp", "ping") start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps(result).encode()] elif path.startswith('/heavy/predict/'): data = path.split('/')[-1] result = client.execute("app:HeavyApp", "predict", data) start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps(result).encode()] elif path == '/heavy/get_worker_id': result = client.execute("app:HeavyApp", "get_worker_id") start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps({"worker_id": result}).encode()] elif path == '/config_limited/info': result = client.execute("app:ConfigLimitedApp", "get_info") start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps(result).encode()] elif path == '/config_limited/get_worker_id': result = client.execute("app:ConfigLimitedApp", "get_worker_id") start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps({"worker_id": result}).encode()] elif path == '/lightweight/get_worker_id': result = client.execute("app:LightweightApp", "get_worker_id") start_response('200 OK', [('Content-Type', 'application/json')]) return [json.dumps({"worker_id": result}).encode()] else: start_response('404 Not Found', [('Content-Type', 'text/plain')]) return [b'Not found'] except Exception as e: start_response('500 Internal Server Error', [('Content-Type', 'application/json')]) return [json.dumps({"error": str(e), "type": type(e).__name__}).encode()] class LightweightApp(DirtyApp): """ A lightweight app that should load on ALL dirty workers. workers=None (default) means all workers load this app. """ def __init__(self): self.initialized = False self.worker_id = None self.call_count = 0 def init(self): self.initialized = True self.worker_id = os.getpid() def ping(self): """Simple ping action.""" self.call_count += 1 return { "pong": True, "worker_id": self.worker_id, "call_count": self.call_count, } def get_worker_id(self): """Return the worker ID that loaded this app.""" return self.worker_id def close(self): pass class HeavyApp(DirtyApp): """ A heavy app that uses the workers class attribute to limit allocation. workers=2 means only 2 dirty workers will load this app. This simulates a large ML model that shouldn't be replicated everywhere. """ workers = 2 # Only 2 workers should load this app def __init__(self): self.initialized = False self.worker_id = None self.model_data = None def init(self): self.initialized = True self.worker_id = os.getpid() # Simulate loading a heavy model self.model_data = {"loaded": True, "worker": self.worker_id} def predict(self, data): """Simulate model prediction.""" return { "prediction": f"result_for_{data}", "worker_id": self.worker_id, } def get_worker_id(self): """Return the worker ID that loaded this app.""" return self.worker_id def close(self): self.model_data = None class ConfigLimitedApp(DirtyApp): """ An app whose worker limit is specified in config (not class attribute). The config will specify this app as "app:ConfigLimitedApp:1" to limit it to a single worker. """ def __init__(self): self.initialized = False self.worker_id = None def init(self): self.initialized = True self.worker_id = os.getpid() def get_info(self): """Get app info.""" return { "app": "ConfigLimitedApp", "worker_id": self.worker_id, } def get_worker_id(self): """Return the worker ID that loaded this app.""" return self.worker_id def close(self): pass benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/docker-compose.yml000066400000000000000000000004721514360242400273670ustar00rootroot00000000000000services: gunicorn: build: context: ../../.. dockerfile: tests/docker/per_app_allocation/Dockerfile ports: - "8001:8000" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/status"] interval: 1s timeout: 1s retries: 30 stop_grace_period: 10s benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/gunicorn_conf.py000066400000000000000000000016461514360242400271410ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Gunicorn configuration for per-app worker allocation e2e tests. Configuration: - 4 dirty workers total - LightweightApp: loads on ALL 4 workers (workers=None) - HeavyApp: loads on 2 workers (via class attribute workers=2) - ConfigLimitedApp: loads on 1 worker (via config :1 suffix) """ bind = "0.0.0.0:8000" workers = 1 # HTTP workers worker_class = "sync" # 4 dirty workers - enough to test distribution dirty_workers = 4 # App configuration: # - LightweightApp: no limit, loads on all 4 # - HeavyApp: workers=2 class attribute, loads on 2 # - ConfigLimitedApp: config override :1, loads on 1 dirty_apps = [ "app:LightweightApp", "app:HeavyApp", "app:ConfigLimitedApp:1", ] dirty_timeout = 30 dirty_graceful_timeout = 5 timeout = 30 graceful_timeout = 5 loglevel = "debug" accesslog = "-" errorlog = "-" benoitc-gunicorn-f5fb19e/tests/docker/per_app_allocation/test_per_app_e2e.py000066400000000000000000000332121514360242400275220ustar00rootroot00000000000000#!/usr/bin/env python # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Docker-based end-to-end tests for per-app worker allocation. These tests verify: 1. Apps with worker limits are only loaded on limited workers 2. Requests are routed to workers that have the target app 3. Round-robin distribution works within limited worker sets 4. Worker crash scenarios maintain correct app allocation Usage: # Build the container first docker compose build # Run all tests pytest test_per_app_e2e.py -v # Run specific test pytest test_per_app_e2e.py::TestPerAppAllocation::test_lightweight_app_round_robins -v """ import os import re import subprocess import time import pytest import requests class DockerContainer: """Context manager for managing a Docker container for per-app tests.""" def __init__(self, name="gunicorn-per-app-test", build=True): self.name = name self.build = build self.container_id = None self.base_url = "http://127.0.0.1:8001" def __enter__(self): # Build if requested if self.build: result = subprocess.run( ["docker", "compose", "build"], cwd=os.path.dirname(__file__), capture_output=True, text=True, ) if result.returncode != 0: raise RuntimeError(f"Docker build failed: {result.stderr}") # Remove any existing container with same name subprocess.run( ["docker", "rm", "-f", self.name], capture_output=True, ) # Start container with a keep-alive wrapper result = subprocess.run( [ "docker", "run", "-d", "--name", self.name, "-p", "8001:8000", "per_app_allocation-gunicorn", "sh", "-c", "gunicorn app:application -c gunicorn_conf.py & " "GUNICORN_PID=$!; " "trap 'kill $GUNICORN_PID 2>/dev/null' TERM; " "while true; do sleep 1; done" ], capture_output=True, text=True, ) if result.returncode != 0: raise RuntimeError(f"Docker run failed: {result.stderr}") self.container_id = result.stdout.strip() # Wait for gunicorn to be ready self._wait_for_ready() return self def __exit__(self, exc_type, exc_val, exc_tb): if self.container_id: # Get logs before cleanup logs = self.get_logs() if exc_val: print(f"\n=== Container logs ===\n{logs}\n=== End logs ===\n") # Stop and remove container subprocess.run( ["docker", "rm", "-f", self.name], capture_output=True, ) def _wait_for_ready(self, timeout=60): """Wait for gunicorn to be ready and serving requests.""" start = time.time() while time.time() - start < timeout: try: resp = requests.get(f"{self.base_url}/status", timeout=1) if resp.status_code == 200: # Also verify dirty workers are up by testing an app resp = requests.get(f"{self.base_url}/lightweight/ping", timeout=2) if resp.status_code == 200: return except requests.exceptions.RequestException: pass time.sleep(0.5) raise TimeoutError("Gunicorn did not start within timeout") def exec(self, cmd, check=True): """Execute a command in the container.""" result = subprocess.run( ["docker", "exec", self.name] + cmd, capture_output=True, text=True, ) if check and result.returncode != 0: raise RuntimeError(f"Command failed: {cmd}\n{result.stderr}") return result def get_logs(self): """Get container logs.""" result = subprocess.run( ["docker", "logs", self.name], capture_output=True, text=True, ) return result.stdout + result.stderr def get_gunicorn_pids(self): """Get PIDs of gunicorn processes.""" pids = { "master": None, "dirty-arbiter": None, "workers": [], "dirty-workers": [], } result = self.exec(["ps", "aux"], check=False) for line in result.stdout.split("\n"): if "gunicorn:" not in line: continue parts = line.split() if len(parts) < 2: continue pid = int(parts[1]) if "gunicorn: master" in line: pids["master"] = pid elif "gunicorn: dirty-arbiter" in line: pids["dirty-arbiter"] = pid elif "gunicorn: dirty-worker" in line: pids["dirty-workers"].append(pid) elif "gunicorn: worker" in line: pids["workers"].append(pid) return pids def kill_process(self, pid, signal=9): """Send a signal to a process in the container.""" self.exec( ["kill", f"-{signal}", str(pid)], check=False, ) def wait_for_dirty_worker_count(self, expected_count, timeout=10): """Wait for specific number of dirty workers.""" start = time.time() while time.time() - start < timeout: pids = self.get_gunicorn_pids() if len(pids["dirty-workers"]) == expected_count: return True time.sleep(0.5) return False def http_get(self, path, timeout=5): """Make HTTP GET request to the container.""" return requests.get(f"{self.base_url}{path}", timeout=timeout) class TestPerAppAllocation: """Test per-app worker allocation functionality.""" @pytest.fixture(autouse=True) def setup(self): """Check Docker is available.""" result = subprocess.run( ["docker", "info"], capture_output=True, ) if result.returncode != 0: pytest.skip("Docker is not available") def test_lightweight_app_responds(self): """LightweightApp should be accessible and respond correctly.""" with DockerContainer() as container: resp = container.http_get("/lightweight/ping") assert resp.status_code == 200 data = resp.json() assert data["pong"] is True assert "worker_id" in data def test_lightweight_app_round_robins(self): """LightweightApp requests should round-robin across all 4 workers.""" with DockerContainer() as container: # Make multiple requests to collect worker IDs worker_ids = set() for _ in range(20): # More than 4 to ensure round-robin resp = container.http_get("/lightweight/get_worker_id") assert resp.status_code == 200 data = resp.json() worker_ids.add(data["worker_id"]) # Should see all 4 workers (or at least more than 1) # Note: Due to timing, we might not hit all 4 in exactly 20 requests assert len(worker_ids) >= 2, ( f"Expected requests to go to multiple workers, got {len(worker_ids)}" ) def test_config_limited_app_uses_one_worker(self): """ConfigLimitedApp (limited to 1 via config) should use only one worker.""" with DockerContainer() as container: # Make multiple requests worker_ids = set() for _ in range(10): resp = container.http_get("/config_limited/get_worker_id") assert resp.status_code == 200 data = resp.json() worker_ids.add(data["worker_id"]) # Should only see 1 worker (the app is limited to 1) assert len(worker_ids) == 1, ( f"Expected ConfigLimitedApp to use only 1 worker, got {len(worker_ids)}" ) def test_heavy_app_uses_limited_workers(self): """HeavyApp (workers=2) should use only 2 workers.""" with DockerContainer() as container: # Make multiple requests worker_ids = set() for _ in range(20): resp = container.http_get("/heavy/get_worker_id") # HeavyApp uses class attribute workers=2 # But currently the arbiter only reads config :N format # This test documents expected behavior if resp.status_code == 200: data = resp.json() worker_ids.add(data["worker_id"]) else: # If class attribute isn't supported yet, skip pytest.skip("HeavyApp class attribute workers=2 not implemented") return # Should see at most 2 workers assert len(worker_ids) <= 2, ( f"Expected HeavyApp to use at most 2 workers, got {len(worker_ids)}" ) def test_heavy_app_prediction_works(self): """HeavyApp.predict() should return correct results.""" with DockerContainer() as container: resp = container.http_get("/heavy/predict/test_input") if resp.status_code == 200: data = resp.json() assert data["prediction"] == "result_for_test_input" assert "worker_id" in data else: # If class attribute isn't supported, document the error data = resp.json() print(f"HeavyApp error: {data}") def test_all_apps_accessible(self): """All configured apps should be accessible.""" with DockerContainer() as container: # LightweightApp resp = container.http_get("/lightweight/ping") assert resp.status_code == 200 # ConfigLimitedApp resp = container.http_get("/config_limited/info") assert resp.status_code == 200 data = resp.json() assert data["app"] == "ConfigLimitedApp" def test_four_dirty_workers_running(self): """Should have 4 dirty workers as configured.""" with DockerContainer() as container: pids = container.get_gunicorn_pids() assert len(pids["dirty-workers"]) == 4, ( f"Expected 4 dirty workers, got {len(pids['dirty-workers'])}" ) class TestPerAppWorkerCrash: """Test per-app allocation behavior when workers crash.""" @pytest.fixture(autouse=True) def setup(self): """Check Docker is available.""" result = subprocess.run( ["docker", "info"], capture_output=True, ) if result.returncode != 0: pytest.skip("Docker is not available") def test_worker_crash_app_still_accessible(self): """When a dirty worker crashes, apps should still be accessible.""" with DockerContainer() as container: pids = container.get_gunicorn_pids() assert len(pids["dirty-workers"]) == 4 # Kill one dirty worker container.kill_process(pids["dirty-workers"][0], signal=9) # Wait for respawn (dirty arbiter should respawn it) assert container.wait_for_dirty_worker_count(4, timeout=15), ( "Dirty arbiter should respawn killed worker" ) # Apps should still work resp = container.http_get("/lightweight/ping") assert resp.status_code == 200 resp = container.http_get("/config_limited/info") assert resp.status_code == 200 def test_config_limited_worker_crash_recovery(self): """When the sole worker for ConfigLimitedApp crashes, it should recover.""" with DockerContainer() as container: # Get the worker ID that handles ConfigLimitedApp resp = container.http_get("/config_limited/get_worker_id") assert resp.status_code == 200 original_worker_id = resp.json()["worker_id"] # Kill that specific worker container.kill_process(original_worker_id, signal=9) # Wait for respawn time.sleep(3) # The new worker should handle ConfigLimitedApp resp = container.http_get("/config_limited/get_worker_id") # Note: There might be a brief period where no worker has the app # In production, this would return an error until respawn if resp.status_code == 200: new_worker_id = resp.json()["worker_id"] # Worker ID should be different (new process) assert new_worker_id != original_worker_id, ( "New worker should have different PID" ) class TestPerAppLogs: """Test that per-app allocation is logged correctly.""" @pytest.fixture(autouse=True) def setup(self): """Check Docker is available.""" result = subprocess.run( ["docker", "info"], capture_output=True, ) if result.returncode != 0: pytest.skip("Docker is not available") def test_logs_show_app_allocation(self): """Logs should indicate which apps are loaded on which workers.""" with DockerContainer() as container: logs = container.get_logs() # Should see dirty arbiter starting assert "Dirty arbiter" in logs or "dirty arbiter" in logs.lower() # Should see dirty workers spawning assert "dirty" in logs.lower() and "worker" in logs.lower() if __name__ == "__main__": pytest.main([__file__, "-v"]) benoitc-gunicorn-f5fb19e/tests/docker/test_asgi_uwsgi/000077500000000000000000000000001514360242400232745ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/test_asgi_uwsgi/Dockerfile000066400000000000000000000005471514360242400252740ustar00rootroot00000000000000FROM python:3.11-slim WORKDIR /build # Copy gunicorn source COPY . /build/ # Install gunicorn from source RUN pip install --no-cache-dir -e . # Copy test app WORKDIR /app COPY tests/docker/test_asgi_uwsgi/app.py /app/ # Expose uWSGI port EXPOSE 8000 CMD ["gunicorn", "--worker-class", "asgi", "--protocol", "uwsgi", "--bind", "0.0.0.0:8000", "app:app"] benoitc-gunicorn-f5fb19e/tests/docker/test_asgi_uwsgi/app.py000066400000000000000000000027121514360242400244300ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Simple ASGI test application for uWSGI protocol testing.""" async def app(scope, receive, send): """Simple ASGI application that echoes request info.""" if scope["type"] == "lifespan": while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return if scope["type"] != "http": return # Read body body = b"" while True: message = await receive() body += message.get("body", b"") if not message.get("more_body", False): break # Build response method = scope["method"] path = scope["path"] query = scope.get("query_string", b"").decode("utf-8") response_body = f"Method: {method}\nPath: {path}\nQuery: {query}\nBody: {body.decode('utf-8')}\n" response_bytes = response_body.encode("utf-8") await send({ "type": "http.response.start", "status": 200, "headers": [ [b"content-type", b"text/plain"], [b"content-length", str(len(response_bytes)).encode()], ], }) await send({ "type": "http.response.body", "body": response_bytes, }) benoitc-gunicorn-f5fb19e/tests/docker/test_asgi_uwsgi/docker-compose.yml000066400000000000000000000007531514360242400267360ustar00rootroot00000000000000services: gunicorn: build: context: ../../.. dockerfile: tests/docker/test_asgi_uwsgi/Dockerfile command: > gunicorn --worker-class asgi --protocol uwsgi --uwsgi-allow-from '*' --bind 0.0.0.0:8000 --workers 1 --log-level debug app:app working_dir: /app nginx: image: nginx:alpine ports: - "8080:80" volumes: - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro depends_on: - gunicorn benoitc-gunicorn-f5fb19e/tests/docker/test_asgi_uwsgi/nginx.conf000066400000000000000000000003601514360242400252650ustar00rootroot00000000000000server { listen 80; server_name localhost; location / { uwsgi_pass gunicorn:8000; include uwsgi_params; } location /health { return 200 "OK"; add_header Content-Type text/plain; } } benoitc-gunicorn-f5fb19e/tests/docker/test_asgi_uwsgi/test_uwsgi.sh000077500000000000000000000042651514360242400260370ustar00rootroot00000000000000#!/bin/bash # Integration test for ASGI uWSGI protocol support # # This script tests that gunicorn's ASGI worker correctly handles # the uWSGI protocol when nginx forwards requests using uwsgi_pass. set -e SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" cd "$SCRIPT_DIR" # Use IPv4 explicitly to avoid Docker IPv6 issues BASE_URL="http://127.0.0.1:8080" cleanup() { echo "Cleaning up..." docker compose down -v 2>/dev/null || true } trap cleanup EXIT echo "=== Building and starting containers ===" docker compose up -d --build echo "=== Waiting for services to be ready ===" sleep 5 echo "=== Running tests ===" # Test 1: Simple GET request echo "Test 1: Simple GET request" RESPONSE=$(curl -s "$BASE_URL/") if echo "$RESPONSE" | grep -q "Method: GET"; then echo " PASS: GET request works" else echo " FAIL: GET request failed" echo " Response: $RESPONSE" exit 1 fi # Test 2: GET with query string echo "Test 2: GET with query string" RESPONSE=$(curl -s "$BASE_URL/search?q=test&page=1") if echo "$RESPONSE" | grep -q "Query: q=test&page=1"; then echo " PASS: Query string works" else echo " FAIL: Query string failed" echo " Response: $RESPONSE" exit 1 fi # Test 3: POST with body echo "Test 3: POST with body" RESPONSE=$(curl -s -X POST -d "hello=world" "$BASE_URL/submit") if echo "$RESPONSE" | grep -q "Method: POST" && echo "$RESPONSE" | grep -q "Body: hello=world"; then echo " PASS: POST with body works" else echo " FAIL: POST with body failed" echo " Response: $RESPONSE" exit 1 fi # Test 4: Path handling echo "Test 4: Path handling" RESPONSE=$(curl -s "$BASE_URL/api/v1/users") if echo "$RESPONSE" | grep -q "Path: /api/v1/users"; then echo " PASS: Path handling works" else echo " FAIL: Path handling failed" echo " Response: $RESPONSE" exit 1 fi # Test 5: Multiple requests (keepalive) echo "Test 5: Multiple requests (keepalive)" for i in 1 2 3; do RESPONSE=$(curl -s "$BASE_URL/request/$i") if ! echo "$RESPONSE" | grep -q "Path: /request/$i"; then echo " FAIL: Request $i failed" exit 1 fi done echo " PASS: Multiple requests work" echo "" echo "=== All tests passed! ===" benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/000077500000000000000000000000001514360242400212325ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/Dockerfile.gunicorn000066400000000000000000000006161514360242400250520ustar00rootroot00000000000000FROM python:3.11-slim WORKDIR /app # Copy gunicorn source COPY . /app/gunicorn-src/ # Install gunicorn from source RUN pip install --no-cache-dir /app/gunicorn-src/ # Copy test application COPY tests/docker/uwsgi/app.py /app/ EXPOSE 8000 CMD ["gunicorn", "--protocol", "uwsgi", "--uwsgi-allow-from", "*", "--bind", "0.0.0.0:8000", "--workers", "2", "--log-level", "debug", "app:application"] benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/Dockerfile.nginx000066400000000000000000000003501514360242400243440ustar00rootroot00000000000000FROM nginx:alpine # Remove default config RUN rm /etc/nginx/conf.d/default.conf # Copy custom config COPY nginx.conf /etc/nginx/nginx.conf COPY uwsgi_params /etc/nginx/uwsgi_params EXPOSE 8080 CMD ["nginx", "-g", "daemon off;"] benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/README.md000066400000000000000000000073171514360242400225210ustar00rootroot00000000000000# uWSGI Protocol Docker Integration Tests This directory contains Docker-based integration tests that verify gunicorn's uWSGI binary protocol implementation works correctly with nginx's `uwsgi_pass` directive. ## Architecture ``` [pytest] --HTTP--> [nginx:8080] --uwsgi_pass--> [gunicorn:8000] ``` The tests make HTTP requests to nginx, which proxies them to gunicorn using the uWSGI binary protocol. This validates the complete request/response cycle through the protocol. ## Prerequisites - Docker - Docker Compose (v2) - Python 3.8+ - pytest - requests ## Running Tests ### From repository root: ```bash # Run all uWSGI integration tests pytest tests/docker/uwsgi/ -v # Run specific test class pytest tests/docker/uwsgi/ -v -k TestBasicRequests # Skip Docker tests (for CI environments without Docker) pytest tests/ -v -m "not docker" ``` ### Manual testing: ```bash cd tests/docker/uwsgi # Start services docker compose up -d # Wait for services to be healthy docker compose ps # Test endpoints curl http://localhost:8080/ curl -X POST -d "test body" http://localhost:8080/echo curl http://localhost:8080/headers curl "http://localhost:8080/query?foo=bar" curl http://localhost:8080/environ curl http://localhost:8080/error/404 curl http://localhost:8080/large > /dev/null # 1MB response # View logs docker compose logs gunicorn docker compose logs nginx # Stop services docker compose down -v ``` ## Test Categories | Category | Description | |----------|-------------| | `TestBasicRequests` | GET, POST, query strings, large bodies | | `TestHeaderPreservation` | Custom headers, Host, Content-Type, User-Agent | | `TestKeepAlive` | Multiple requests per connection | | `TestErrorResponses` | HTTP error codes (400, 404, 500, etc.) | | `TestEnvironVariables` | WSGI environ: REQUEST_METHOD, PATH_INFO, etc. | | `TestLargeResponses` | 1MB response body streaming | | `TestConcurrency` | Parallel request handling | | `TestSpecialCases` | Edge cases: binary data, unicode, long headers | ## Files | File | Purpose | |------|---------| | `docker-compose.yml` | Orchestrates nginx + gunicorn containers | | `Dockerfile.gunicorn` | Builds gunicorn image with test app | | `Dockerfile.nginx` | Builds nginx with uwsgi config | | `nginx.conf` | nginx configuration using `uwsgi_pass` | | `uwsgi_params` | Standard uwsgi parameter mappings | | `app.py` | Test WSGI application with multiple endpoints | | `conftest.py` | pytest fixtures for Docker lifecycle | | `test_uwsgi_integration.py` | Test cases | ## Test App Endpoints | Endpoint | Method | Description | |----------|--------|-------------| | `/` | GET | Basic hello response | | `/echo` | POST | Echo request body | | `/headers` | GET/POST | Return received headers as JSON | | `/environ` | GET/POST | Return WSGI environ as JSON | | `/query` | GET | Return query params as JSON | | `/json` | POST | Parse and echo JSON body | | `/error/{code}` | GET | Return specified HTTP error | | `/large` | GET | Return 1MB response | ## Gunicorn Configuration The gunicorn container runs with: ```bash gunicorn \ --protocol uwsgi \ --uwsgi-allow-from "*" \ --bind 0.0.0.0:8000 \ --workers 2 \ --log-level debug \ app:application ``` Key settings: - `--protocol uwsgi`: Enable uWSGI binary protocol - `--uwsgi-allow-from "*"`: Accept connections from Docker network IPs ## Troubleshooting ### Services won't start Check Docker logs: ```bash docker compose logs ``` ### Connection refused Wait for health checks: ```bash docker compose ps # Check health status ``` ### Tests timing out Increase `STARTUP_TIMEOUT` in `conftest.py` or check if ports are in use: ```bash lsof -i :8080 lsof -i :8000 ``` ### Rebuild after code changes ```bash docker compose build --no-cache docker compose up -d ``` benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/app.py000066400000000000000000000151251514360242400223700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Test WSGI application for uWSGI protocol integration tests. This application provides various endpoints to test different aspects of the uWSGI binary protocol when proxied through nginx. """ import json def application(environ, start_response): """Main WSGI application entry point.""" path = environ.get('PATH_INFO', '/') method = environ.get('REQUEST_METHOD', 'GET') # Route to appropriate handler if path == '/': return handle_root(environ, start_response) elif path == '/echo': return handle_echo(environ, start_response) elif path == '/headers': return handle_headers(environ, start_response) elif path == '/environ': return handle_environ(environ, start_response) elif path.startswith('/error/'): return handle_error(environ, start_response, path) elif path == '/large': return handle_large(environ, start_response) elif path == '/json': return handle_json(environ, start_response) elif path == '/query': return handle_query(environ, start_response) else: return handle_not_found(environ, start_response) def handle_root(environ, start_response): """Basic root endpoint.""" status = '200 OK' headers = [('Content-Type', 'text/plain')] start_response(status, headers) return [b'Hello from gunicorn uWSGI!\n'] def handle_echo(environ, start_response): """Echo back the request body.""" try: content_length = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): content_length = 0 body = b'' if content_length > 0: body = environ['wsgi.input'].read(content_length) status = '200 OK' headers = [ ('Content-Type', 'application/octet-stream'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] def handle_headers(environ, start_response): """Return received HTTP headers as JSON.""" headers_dict = {} for key, value in environ.items(): if key.startswith('HTTP_'): # Convert HTTP_X_CUSTOM_HEADER to X-Custom-Header header_name = key[5:].replace('_', '-').title() headers_dict[header_name] = value # Also include some special headers if 'CONTENT_TYPE' in environ: headers_dict['Content-Type'] = environ['CONTENT_TYPE'] if 'CONTENT_LENGTH' in environ: headers_dict['Content-Length'] = environ['CONTENT_LENGTH'] body = json.dumps(headers_dict, indent=2).encode('utf-8') status = '200 OK' headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] def handle_environ(environ, start_response): """Return WSGI environ variables as JSON.""" # Filter to serializable values safe_environ = {} skip_keys = {'wsgi.input', 'wsgi.errors', 'wsgi.file_wrapper'} for key, value in environ.items(): if key in skip_keys: continue try: # Test if value is JSON serializable json.dumps(value) safe_environ[key] = value except (TypeError, ValueError): safe_environ[key] = str(value) body = json.dumps(safe_environ, indent=2).encode('utf-8') status = '200 OK' headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] def handle_error(environ, start_response, path): """Return specified HTTP error code.""" try: code = int(path.split('/')[-1]) except ValueError: code = 500 status_messages = { 400: 'Bad Request', 401: 'Unauthorized', 403: 'Forbidden', 404: 'Not Found', 500: 'Internal Server Error', 502: 'Bad Gateway', 503: 'Service Unavailable', } message = status_messages.get(code, 'Error') status = f'{code} {message}' body = json.dumps({'error': message, 'code': code}).encode('utf-8') headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] def handle_large(environ, start_response): """Return a 1MB response body for testing large responses.""" # Generate 1MB of data (1024 * 1024 bytes) chunk_size = 1024 num_chunks = 1024 chunk = b'X' * chunk_size status = '200 OK' headers = [ ('Content-Type', 'application/octet-stream'), ('Content-Length', str(chunk_size * num_chunks)) ] start_response(status, headers) # Return as generator for streaming def generate(): for _ in range(num_chunks): yield chunk return generate() def handle_json(environ, start_response): """Handle JSON POST requests.""" try: content_length = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): content_length = 0 if content_length > 0: body = environ['wsgi.input'].read(content_length) try: data = json.loads(body.decode('utf-8')) response = {'received': data, 'status': 'ok'} except json.JSONDecodeError: response = {'error': 'Invalid JSON', 'status': 'error'} else: response = {'error': 'No body', 'status': 'error'} body = json.dumps(response).encode('utf-8') status = '200 OK' headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] def handle_query(environ, start_response): """Return query string parameters as JSON.""" from urllib.parse import parse_qs query_string = environ.get('QUERY_STRING', '') params = parse_qs(query_string) # Convert lists to single values where appropriate simple_params = {k: v[0] if len(v) == 1 else v for k, v in params.items()} body = json.dumps(simple_params).encode('utf-8') status = '200 OK' headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] def handle_not_found(environ, start_response): """Handle 404 for unknown paths.""" body = json.dumps({'error': 'Not Found', 'path': environ.get('PATH_INFO')}).encode('utf-8') status = '404 Not Found' headers = [ ('Content-Type', 'application/json'), ('Content-Length', str(len(body))) ] start_response(status, headers) return [body] benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/conftest.py000066400000000000000000000062611514360242400234360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ pytest fixtures for uWSGI Docker integration tests. """ import os import subprocess import time import pytest import requests COMPOSE_FILE = os.path.join(os.path.dirname(__file__), 'docker-compose.yml') NGINX_URL = 'http://127.0.0.1:8080' STARTUP_TIMEOUT = 60 # seconds def is_docker_available(): """Check if Docker is available.""" try: result = subprocess.run( ['docker', 'info'], capture_output=True, timeout=10 ) return result.returncode == 0 except (subprocess.TimeoutExpired, FileNotFoundError): return False def is_compose_available(): """Check if docker compose is available.""" try: result = subprocess.run( ['docker', 'compose', 'version'], capture_output=True, timeout=10 ) return result.returncode == 0 except (subprocess.TimeoutExpired, FileNotFoundError): return False docker_available = pytest.mark.skipif( not is_docker_available() or not is_compose_available(), reason="Docker or docker compose not available" ) @pytest.fixture(scope='session') def docker_services(): """ Start Docker Compose services for the test session. This fixture builds and starts the gunicorn and nginx containers, waits for them to be healthy, and tears them down after all tests. """ if not is_docker_available() or not is_compose_available(): pytest.skip("Docker or docker compose not available") # Build and start services subprocess.run( ['docker', 'compose', '-f', COMPOSE_FILE, 'build'], check=True, capture_output=True ) subprocess.run( ['docker', 'compose', '-f', COMPOSE_FILE, 'up', '-d'], check=True, capture_output=True ) # Wait for services to be healthy start_time = time.time() while time.time() - start_time < STARTUP_TIMEOUT: try: response = requests.get(f'{NGINX_URL}/', timeout=2) if response.status_code == 200: break except requests.RequestException: pass time.sleep(1) else: # Get logs for debugging logs = subprocess.run( ['docker', 'compose', '-f', COMPOSE_FILE, 'logs'], capture_output=True, text=True ) subprocess.run( ['docker', 'compose', '-f', COMPOSE_FILE, 'down', '-v'], capture_output=True ) pytest.fail( f"Services did not become healthy within {STARTUP_TIMEOUT}s.\n" f"Logs:\n{logs.stdout}\n{logs.stderr}" ) yield # Teardown subprocess.run( ['docker', 'compose', '-f', COMPOSE_FILE, 'down', '-v'], capture_output=True ) @pytest.fixture def nginx_url(docker_services): """Return the nginx base URL.""" return NGINX_URL @pytest.fixture def session(docker_services): """Return a requests Session with keep-alive enabled.""" with requests.Session() as s: # Enable keep-alive s.headers['Connection'] = 'keep-alive' yield s benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/docker-compose.yml000066400000000000000000000012451514360242400246710ustar00rootroot00000000000000services: gunicorn: build: context: ../../.. dockerfile: tests/docker/uwsgi/Dockerfile.gunicorn expose: - "8000" healthcheck: test: ["CMD", "python", "-c", "import socket; s=socket.socket(); s.connect(('localhost', 8000)); s.close()"] interval: 2s timeout: 5s retries: 10 start_period: 5s nginx: build: context: . dockerfile: Dockerfile.nginx ports: - "8080:8080" depends_on: gunicorn: condition: service_healthy healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/"] interval: 2s timeout: 5s retries: 10 start_period: 5s benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/nginx.conf000066400000000000000000000021131514360242400232210ustar00rootroot00000000000000worker_processes 1; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent"'; access_log /var/log/nginx/access.log main; error_log /var/log/nginx/error.log debug; sendfile on; keepalive_timeout 65; upstream gunicorn { server gunicorn:8000; } server { listen 8080; server_name localhost; # Increase buffer sizes for large headers uwsgi_buffer_size 32k; uwsgi_buffers 8 32k; uwsgi_busy_buffers_size 64k; # Read timeout for large responses uwsgi_read_timeout 300s; location / { uwsgi_pass gunicorn; include uwsgi_params; # Pass additional headers uwsgi_param HTTP_X_FORWARDED_FOR $proxy_add_x_forwarded_for; uwsgi_param HTTP_X_REAL_IP $remote_addr; } } } benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/test_uwsgi_integration.py000066400000000000000000000271601514360242400264120ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Integration tests for gunicorn's uWSGI binary protocol with nginx. These tests verify that gunicorn correctly implements the uWSGI binary protocol by running actual requests through nginx's uwsgi_pass directive. """ import concurrent.futures import json import pytest import requests from conftest import docker_available @docker_available class TestBasicRequests: """Test basic HTTP request handling through uWSGI protocol.""" def test_get_root(self, nginx_url): """Test basic GET request to root endpoint.""" response = requests.get(f'{nginx_url}/') assert response.status_code == 200 assert b'Hello from gunicorn uWSGI!' in response.content def test_get_with_query_string(self, nginx_url): """Test GET request with query string parameters.""" response = requests.get(f'{nginx_url}/query?foo=bar&baz=qux') assert response.status_code == 200 data = response.json() assert data['foo'] == 'bar' assert data['baz'] == 'qux' def test_post_echo(self, nginx_url): """Test POST request with body echo.""" test_body = b'This is a test body content' response = requests.post(f'{nginx_url}/echo', data=test_body) assert response.status_code == 200 assert response.content == test_body def test_post_json(self, nginx_url): """Test POST request with JSON body.""" test_data = {'key': 'value', 'number': 42, 'nested': {'a': 1}} response = requests.post( f'{nginx_url}/json', json=test_data, headers={'Content-Type': 'application/json'} ) assert response.status_code == 200 data = response.json() assert data['status'] == 'ok' assert data['received'] == test_data def test_post_large_body(self, nginx_url): """Test POST with large request body (100KB).""" large_body = b'X' * (100 * 1024) response = requests.post(f'{nginx_url}/echo', data=large_body) assert response.status_code == 200 assert len(response.content) == len(large_body) assert response.content == large_body @docker_available class TestHeaderPreservation: """Test that headers are correctly passed through uWSGI protocol.""" def test_custom_headers(self, nginx_url): """Test custom headers are passed to the application.""" custom_headers = { 'X-Custom-Header': 'custom-value', 'X-Another-Header': 'another-value' } response = requests.get(f'{nginx_url}/headers', headers=custom_headers) assert response.status_code == 200 data = response.json() assert data.get('X-Custom-Header') == 'custom-value' assert data.get('X-Another-Header') == 'another-value' def test_host_header(self, nginx_url): """Test Host header is passed correctly.""" response = requests.get( f'{nginx_url}/headers', headers={'Host': 'test.example.com'} ) assert response.status_code == 200 data = response.json() assert data.get('Host') == 'test.example.com' def test_content_type_header(self, nginx_url): """Test Content-Type header is passed correctly.""" response = requests.post( f'{nginx_url}/headers', data='test', headers={'Content-Type': 'application/x-custom-type'} ) assert response.status_code == 200 data = response.json() assert data.get('Content-Type') == 'application/x-custom-type' def test_user_agent_header(self, nginx_url): """Test User-Agent header is passed correctly.""" response = requests.get( f'{nginx_url}/headers', headers={'User-Agent': 'TestAgent/1.0'} ) assert response.status_code == 200 data = response.json() assert data.get('User-Agent') == 'TestAgent/1.0' @docker_available class TestKeepAlive: """Test HTTP keep-alive with multiple requests per connection.""" def test_multiple_requests_same_session(self, session, nginx_url): """Test multiple requests using same session/connection.""" for i in range(5): response = session.get(f'{nginx_url}/') assert response.status_code == 200 def test_mixed_requests_same_session(self, session, nginx_url): """Test mixed GET and POST requests using same session.""" # GET request response = session.get(f'{nginx_url}/') assert response.status_code == 200 # POST request response = session.post(f'{nginx_url}/echo', data=b'test') assert response.status_code == 200 assert response.content == b'test' # Another GET response = session.get(f'{nginx_url}/headers') assert response.status_code == 200 # JSON POST response = session.post(f'{nginx_url}/json', json={'test': 1}) assert response.status_code == 200 @docker_available class TestErrorResponses: """Test HTTP error responses through uWSGI protocol.""" @pytest.mark.parametrize('code', [400, 401, 403, 404, 500, 502, 503]) def test_error_codes(self, nginx_url, code): """Test various HTTP error codes are returned correctly.""" response = requests.get(f'{nginx_url}/error/{code}') assert response.status_code == code data = response.json() assert data['code'] == code def test_not_found(self, nginx_url): """Test 404 for non-existent path.""" response = requests.get(f'{nginx_url}/nonexistent/path') assert response.status_code == 404 data = response.json() assert data['error'] == 'Not Found' assert data['path'] == '/nonexistent/path' @docker_available class TestEnvironVariables: """Test WSGI environ variables are correctly set.""" def test_request_method(self, nginx_url): """Test REQUEST_METHOD is set correctly.""" response = requests.get(f'{nginx_url}/environ') assert response.status_code == 200 data = response.json() assert data.get('REQUEST_METHOD') == 'GET' response = requests.post(f'{nginx_url}/environ', data='') data = response.json() assert data.get('REQUEST_METHOD') == 'POST' def test_path_info(self, nginx_url): """Test PATH_INFO is set correctly.""" response = requests.get(f'{nginx_url}/environ') assert response.status_code == 200 data = response.json() assert data.get('PATH_INFO') == '/environ' def test_query_string(self, nginx_url): """Test QUERY_STRING is set correctly.""" response = requests.get(f'{nginx_url}/environ?foo=bar&test=123') assert response.status_code == 200 data = response.json() assert data.get('QUERY_STRING') == 'foo=bar&test=123' def test_server_protocol(self, nginx_url): """Test SERVER_PROTOCOL is set.""" response = requests.get(f'{nginx_url}/environ') assert response.status_code == 200 data = response.json() assert 'SERVER_PROTOCOL' in data assert data['SERVER_PROTOCOL'].startswith('HTTP/') def test_content_length(self, nginx_url): """Test CONTENT_LENGTH is set for POST requests.""" body = 'test body content' response = requests.post(f'{nginx_url}/environ', data=body) assert response.status_code == 200 data = response.json() assert data.get('CONTENT_LENGTH') == str(len(body)) @docker_available class TestLargeResponses: """Test large response handling through uWSGI protocol.""" def test_1mb_response(self, nginx_url): """Test 1MB response body is received correctly.""" response = requests.get(f'{nginx_url}/large') assert response.status_code == 200 assert len(response.content) == 1024 * 1024 # Verify content is all 'X' characters assert response.content == b'X' * (1024 * 1024) def test_large_response_content_length(self, nginx_url): """Test Content-Length header for large response.""" response = requests.get(f'{nginx_url}/large') assert response.status_code == 200 assert response.headers.get('Content-Length') == str(1024 * 1024) @docker_available class TestConcurrency: """Test concurrent request handling.""" def test_parallel_requests(self, nginx_url): """Test handling multiple parallel requests.""" num_requests = 20 def make_request(i): response = requests.get(f'{nginx_url}/query?id={i}') return response.status_code, response.json().get('id') with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(make_request, i) for i in range(num_requests)] results = [f.result() for f in concurrent.futures.as_completed(futures)] # All requests should succeed assert all(status == 200 for status, _ in results) # All IDs should be present ids = set(id_val for _, id_val in results) assert ids == set(str(i) for i in range(num_requests)) def test_parallel_mixed_requests(self, nginx_url): """Test parallel GET and POST requests.""" def get_request(): return requests.get(f'{nginx_url}/').status_code def post_request(data): response = requests.post(f'{nginx_url}/echo', data=data) return response.status_code, response.content with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: get_futures = [executor.submit(get_request) for _ in range(10)] post_futures = [ executor.submit(post_request, f'data-{i}'.encode()) for i in range(10) ] get_results = [f.result() for f in get_futures] post_results = [f.result() for f in post_futures] assert all(status == 200 for status in get_results) assert all(status == 200 for status, _ in post_results) @docker_available class TestSpecialCases: """Test edge cases and special scenarios.""" def test_empty_body_post(self, nginx_url): """Test POST with empty body.""" response = requests.post(f'{nginx_url}/echo', data=b'') assert response.status_code == 200 assert response.content == b'' def test_binary_body(self, nginx_url): """Test POST with binary body containing null bytes.""" binary_data = bytes(range(256)) response = requests.post(f'{nginx_url}/echo', data=binary_data) assert response.status_code == 200 assert response.content == binary_data def test_unicode_in_query_string(self, nginx_url): """Test unicode characters in query string.""" response = requests.get(f'{nginx_url}/query', params={'name': 'test'}) assert response.status_code == 200 data = response.json() assert data.get('name') == 'test' def test_special_characters_in_path(self, nginx_url): """Test handling of special path that triggers 404.""" # This should return 404 since the path doesn't exist response = requests.get(f'{nginx_url}/path/with/slashes') assert response.status_code == 404 def test_long_header_value(self, nginx_url): """Test handling of long header values.""" long_value = 'X' * 4096 # 4KB header value response = requests.get( f'{nginx_url}/headers', headers={'X-Long-Header': long_value} ) assert response.status_code == 200 data = response.json() assert data.get('X-Long-Header') == long_value benoitc-gunicorn-f5fb19e/tests/docker/uwsgi/uwsgi_params000066400000000000000000000012271514360242400236600ustar00rootroot00000000000000uwsgi_param QUERY_STRING $query_string; uwsgi_param REQUEST_METHOD $request_method; uwsgi_param CONTENT_TYPE $content_type; uwsgi_param CONTENT_LENGTH $content_length; uwsgi_param REQUEST_URI $request_uri; uwsgi_param PATH_INFO $document_uri; uwsgi_param DOCUMENT_ROOT $document_root; uwsgi_param SERVER_PROTOCOL $server_protocol; uwsgi_param REQUEST_SCHEME $scheme; uwsgi_param HTTPS $https if_not_empty; uwsgi_param REMOTE_ADDR $remote_addr; uwsgi_param REMOTE_PORT $remote_port; uwsgi_param SERVER_PORT $server_port; uwsgi_param SERVER_NAME $server_name; benoitc-gunicorn-f5fb19e/tests/requests/000077500000000000000000000000001514360242400205005ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/requests/invalid/000077500000000000000000000000001514360242400221265ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/requests/invalid/001.http000066400000000000000000000000351514360242400233250ustar00rootroot00000000000000GET /foo/bar HTTP/1.0\r\n bazbenoitc-gunicorn-f5fb19e/tests/requests/invalid/001.py000066400000000000000000000002521514360242400227770ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import NoMoreData request = NoMoreDatabenoitc-gunicorn-f5fb19e/tests/requests/invalid/002.http000066400000000000000000000000251514360242400233250ustar00rootroot00000000000000GET HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/invalid/002.py000066400000000000000000000002731514360242400230030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestLine request = InvalidRequestLine benoitc-gunicorn-f5fb19e/tests/requests/invalid/003.http000066400000000000000000000000321514360242400233240ustar00rootroot00000000000000GET\n/\nHTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/003.py000066400000000000000000000002731514360242400230040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestLine request = InvalidRequestLine benoitc-gunicorn-f5fb19e/tests/requests/invalid/003b.http000066400000000000000000000000371514360242400234730ustar00rootroot00000000000000bla:rgh /foo HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/003b.py000066400000000000000000000002761514360242400231510ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestMethod request = InvalidRequestMethodbenoitc-gunicorn-f5fb19e/tests/requests/invalid/003c.http000066400000000000000000000000331514360242400234700ustar00rootroot00000000000000-bl /foo HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/003c.py000066400000000000000000000002771514360242400231530ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestMethod request = InvalidRequestMethod benoitc-gunicorn-f5fb19e/tests/requests/invalid/004.http000066400000000000000000000000311514360242400233240ustar00rootroot00000000000000GET /foo FTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/invalid/004.py000066400000000000000000000002721514360242400230040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHTTPVersion request = InvalidHTTPVersionbenoitc-gunicorn-f5fb19e/tests/requests/invalid/005.http000066400000000000000000000000511514360242400233270ustar00rootroot00000000000000GET /foo HTTP/1.1\r\n ba\0z: bar\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/invalid/005.py000066400000000000000000000002701514360242400230030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeaderName request = InvalidHeaderNamebenoitc-gunicorn-f5fb19e/tests/requests/invalid/006.http000066400000000000000000000100321514360242400233300ustar00rootroot00000000000000PUT /q=08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNBjE3pAeaEc6Vk2ENLlW8WVCe HTTP/1.0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/006.py000066400000000000000000000002671514360242400230120ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import LimitRequestLine request = LimitRequestLine benoitc-gunicorn-f5fb19e/tests/requests/invalid/007.http000066400000000000000000000202461514360242400233410ustar00rootroot00000000000000PUT /stuff/here?foo=bar HTTP/1.0\r\n Server: http://127.0.0.1:5984\r\n Content-Type: application/json\r\n Content-Length: 14\r\n Someheader: 0X0VfvRJPKiUBYDUS0Vbdm9Rv6pQ1giLdvXeG1SbOwwEjzKceTxd5RKlt9KHVdQkZPqnZ3jLsuj67otzLqX0Q1dY1EsBI1InsyGc2Dxdr5o7W5DsBGYV0SDMyta3V9bmBJXJQ6g8R9qPtNrED4eIPvVmFY7aokhFb4TILl5UnL8qI6qqiyniYDaPVMxDlZaoCNkDbukO34fOUJD6ZN541qmjWEq1rvtAYDI77mkzWSx5zOkYd62RFmY7YKrQC5gtIVq8SBLp09Ao53S3895ABRcxjrg99lfbgLQFYwbM4FQ6ab1Ll2uybZyEU8MHPt5Czst0cRsoG819SBphxygWcCNwB93KGLi1K9eiCuAgx6Ove165KObLrvfA1rDI5hiv83Gql0UohgKtHeRmtqM0McnCO1VWAnFxpi1hxIAlBrR4w35EcaryGEKKcL34QyzD1zlF4mkQkr1EAOTgIMKoLipGUgykz7UFN1cCuWyo3CkdZvukBS3IGtEfxFuFCcnp70WTIjZxXxU4owMbWW1ER5Gsx0ilET0mzekZL0ngCikNP2BRQikRdlVBQ3eiLzDjq27UAm7ufQ9MJla8Yxd6Ea37un9DMltQwGmnmeG5pET54STq72qfY4HCerWHbCX1qwHTErMfEfIWcYldDfytUTOj7NcWRga3xW7JYpPZHdlkb24evup3lI4arY6j5a12ZcX9zVI02IJG0QD9T4zSHEV0pdVFZ8xwOlSWKuZ9VZMmRyOwmfhIPA7fDV5SP8weRlSnSCSN4YBAfzFVNfPTyeoSfVpXsxIABhXEQTg12YvAAn9390wFhEhMsT9FWIiIs7oH63tQyjdEAZSJcZ0nSQfapvi4BDsQSMv3W2DofSzxwOPrVQWRMyvP0UV0J660Gc4iZ2Tixe3DSeqg9VuNvij09aCbkBdwJh9r4UWmM1Hp1ZDF5Rr14nKtFAgjVlGlfZi4bWQKTzOlqaVbWBvxdKsJ27eelyDnasIPqo17yY5lg10Lb8nyu60Wn7l7Xb0Ndp334B5am4Vh1foctvkkhNFeIejtnjPYmWjS77rJ1aL0zJka4Xog5Oparvc93Pddf9CzCxgle00BTKNj0syVo5uqvX5PVzdhAnigU4jdPbJbcPpbpJRU4UDqIswRNJOlGfpdLnCvnPIRB2a7btjFTaE0tne0TjedGbePje1Li21rPXPX7t5LICWl1SRyqQ9x9woGEv1sI5VgpRoKtS6oxWgMERjP3LcEez3XqLiSwv0rWMlDiJhxEopz8Mklx8ZygQLiwIYx2pNq0JhKB8K1lZ8dYE5d3nRWhXwG4gFTUg2JYjnjL81WGRmjXnZEVLwYfYBUkRlqWAYHi1E6wF85BfcwvkgnEeBTiQSlfu6xwCYaW2OEogq7tbdinvlpeEPij1qQivpcs573HPHpkXrEeXC9P2gZhmV1Rvn69NAN2lOXSVe8XotSyCG5fHFsTDYlOvYW8EBrAdWuZrwU753xwjk3QCp2ODetYze98voig4lfYHrrWT43VXcHt8J5z7U3kt5O460buwESBhgkALZdrFYyy4YQcmnAeSCw5OoLArDEmzaI4JkFBCDqQxTE9BTYA112r9ymuOo5MGkTDYZlvtvopG4ekorfLoIa13Z9L6ZilXT1cg55dvNlOrbTSHpQTYRJfJ6x71IpDFyvdbZbOHQYMm98fcN9CLqFErkpcN4JO26GIhSodGGTSnzyUxBYueawFNlGxCMTa6JseX9c7Xlo8NRaZHBPvG7Z4gUCkOdUSEW0RRTs3TSSdjEKnJ6u9RdDqqyvN8cJ7gliTd04mSyVnkmxdqVU8DrdIrkSCfVQNoFgdydDHS3wMLU6QGTGBzK5pd9EfsDEeYXtIb3CkRupM4SERGMTN8TyIxqqIyWmgjBmSGLTFOB5tsPhkVydVQNf7jBkDy6THfBy0uALVUkm2jLeTFXjajyeL4ms5Lgx0eLoz0XWN6WulXSA20zV3ObSCHbBeVUgKmPxHq5qPmAi04VFIvCOJ0rBQJh9ZHJMwvhI3VEBF6EmXOiRCn0XOhm3pfHlmaCAWrOSGuQs3NCNlFRjwmVRPY5FJrKYjH3FrLrLdU07zdViAix8C4LxVrRrMB6ligZC3CoDhFA4vMjiPU5SBRqRW4lwVnvMZEZbf0AYbBc2ymnKAOWbQwt2ldiI2qL0aLoL6YtSFUhpwMOR3LP1feUq6XRO5xc9V02nEt9MRQsl5MgmKMcXap4HqAN0yATpjAGRnWqEnE7E1XZg95cEl2gO4HXejKzR0kiTUudcw6P4t1RYLRx7isZNJxiq1JZz6FpEe7QhwGbhPySNMbXJtmYuhAaTpfGdGKMxvHHB9LmELOChdyfjHMwMZ2B0xgU2eJgJimCwLH3UEmExgAwJDD4GSCqevYAMK4P9FKPl0dku0KZ7uOJ8oNloEsrbvMuhuKFDuO1PNvxtdCcgASzNVzdueOtUm1giZIDqbb6j11nqi9NoFeck1zZi2kfGF7OeUp4vYszuhQNi4vd03QeVAduM9h9v36Nz1YobRxB2CjTp6qdKdW9IYBp8aExZpipnJIbfD2hTWE44kIu7Q17f4C9kycGjsLwAWkVbfTRmBMU8SbVKV1EJTrN1gGqGX7quSwg1Vp4qslKAk6EIkoReIl5DuzuH8Rbvrkp5LFFAhNhb1hvXvVWcibtDjQSradNtuYzGf2AAduhxOTnZjzbsceGYhQA5a5NtqxE2GBlW8CPoPzIyfMfPjdAIUmAcns7Fkp44nju2htwhryUyidEzDVyTwevquARjt5a7eu8qIKfPrYgbOAlPgA1JHNi55ivTNpDuQ8drNiafZIntA43HI447WtITYYvLxFRG8OWvJRwI0N7dvHYO8H8lYI1OwatfvLKlJqjtdJBBvMWXdT4SbxHUdNTDUQmqFGZaLx1AvYPnJTYRzrqn5ZnXyWQ1ZCwtvZK209TxoezJ2sGorE46C7Zyki6EcXlX2A8upUUh9IhqLYTzidIRrAPE5mZmosyDyShjnRiN5CLXZAI21eV4v3a6WXI8TKkUk3fhhajOgPXshlyCEfDAyESpz1J8RECu6vQs81E1ZNE5ha5UGw2wk3Ea8oSTfqTiu0OeisV2a6bfldvW4x0OL8PS57uuY0v0OZPSUPWmPQgnmJRVw8vmh62bpFekMnUH7y31fXU6MIyZaiBs1FEu7qF6irBszHt2ARy50SjgGwQZWcecgvB8gB874g3ES9mZer3diYGF3Wssmsm6XRdsNcuNn3yzuoi52cRrBYUOISegTBVApn4zfuCC9Y4AAfe6wmmiuN8hL6KJeOjrdK5EFQHGyrzeuIMaT3B2nKz1PNONVQ0udbqCQebz3cq7NPe6kGKFLiE6euWjdoMuAbuu8rTkAa42ensXz4a1Yo450ZVgYypaDtepDQWFkJyTHDW1HTVZfCok0tp7STRiQ8n3NKxOUSL9veuTsDs1FaV2rbzR3DvkEJrhJ10Rm0pvLgui5GUDKyWLnrqcNVtOIzFaj9K5pwMfnREm1VIs84ePX0GsMjirfOfubzDoYjavbiCtTB86nKx0tfCKtl0yUQ5PWSBqdGASY3mr5hZcFZ9bA6uXXGTNqMpUH3gqxCoF6t2yAim93t77jYkiFt3OBlBRVQzRsPbgEKRXbX3bWQj6NpDzNCQPYTs45HsQB967f4yByzLH8X289YAZJhJJyFTMCLbpdKFuMBX5Msyr4d15sBa1h5bI13dqU14WBnMKD12LkHMjHiyde6xf5EELf082sUfiAZaROFuDCDnA89p6y6oYEUgF1L9yQElZO4R6IrkJsEFN9hvARf3CH4ENqbYxtUN9gsB9CLCGKMy2R4wGKU3Dkyea27YCR4QHCdqX3HqOpy12uxBANvbrfEro9q5NJrGK7WVq3nNabN05x4TmIZk3asc8ehvDyhSgQLY0wwyvrkcYqNiETybJ57RjwVg1YE0IZEBfyAUNXE4goc2jtbZbHfcpTzt08pSJQZTAzuxrdQLS4EnaFHPpMdPh1YXUdclj6g2sjYbhoTYcV97bVDAUztMZ4EarUcv6tgQOvK66RmJCF2zVEpFDBS6AVZJWzrVlnuiweXpH0L9eY2Wy2EuAHi7gL4o0i0AkOapqY1TPUWUwBaVrKQzkL8QQbczgc97pMvSnGYMlcSdzlamFtUmRoOPmhBGMpVqmcxnstnqJ0TXMV65zbRN2hk3YVF5HwPjuWJmfkVYnyazuqKuaaohrQIe7YOOSAmD7C2vDnI50y1oScQqIPb87QAmguFz7jfNBSPymjPJ7UrToaJen7LEQr8S2b69ayZYNIyWbcpaW5ACUqdyT5AeHYhdENORnWS2B17qnBPtyvb4WujJCafLmsMFhQbcGonDZkHEOAnOcwRwJ4KIPr4MlQLRKsdnurPDDEmpCtCnFg8vPObOPHoHgICb9j35pG1YNhAAGIGTZ4g3JTJzFvTcW7GDRxREPZffKOuQTJoMYYaaPwnE0SainEpCFAukJbDy1ss5cZt60nqTw1asLzwMKJu5PHpU9sB9YN7J2cPhIbfb4387zSmSvqbt3I8NFjDbuYEhe6nZ7gRT5Th0W0MoyzHlmy4MSXbaAfUJNsLQJmdhdVKDsqMz0aXKIVNsXtn88owrhw0yqxU0K3IfTothafhpQ8daRUnbjzULViWRvUz7dI1N3GgylRzaEXQPgbj0DQ7RujNTcJoSp7I1ELjFFSBZDm4Jx5eXq0aS2SKJPFX7XmFfkkR99wRiHx4ByVTL5umojRhY5j8vg3l3yfliJbeOTXckaYiezrucuHaiVFWR2kjk9PUm57bDpvtSFMic652iDufj4hqpy5MH5r2lg67T6Bbb3fcq49cVJ3hkN2GfRqVhoPxmHyvotu5koheVh7oHDaLaf4VvcQMd5MF8sicaX3GXfoLjlfFZwfJBpXNbbVemD7XghpIEwuFjA1USU8yJnTdvCJ2bFmPNWFeWsBVDyl7XUsbgB3K2zz806xODZT639dqiqhGXQNbgYtShikQhiHhZF4wf4IY588LE4EO2bdXBb2Wezm8Gl2J5GAfqnx5Z6NF7h1gGkM27hpnmKNylKZjqTNANj0CRU4awpdVrYGX7hT0u452Y5bXpVl7cLuK7j2k7VG93NXPsXADhQA8R9WDcpU0PLzFWFq1omoQ9ZRSlvh8R4pRp4vHIYf4A5uQEmv5Owr4pFQcWdp5GAdkpBaSHvUhvMxOSpsqVB2LHvvs1RiOUHHhHdZEKpX25mK9moud8pKT4efru1SlRRSsxdz87hTJMUrueydHDPXbo9AvExctdqxuCk03Fy8cB57qrkQQ50oGNuTNPColMrwVfmuTt81uSZremLbINILnCVXEnvTugRQfFYMnprqMB4mVJfZfh6XVLdOyW4BPaFrBsZGFy7udoWJwE8ACx4UpJW6m1ltckofzA6AUxzXprXDCCL118m8bBB2hzDKmqeLk5ZYKsLROkTqRAxmJjBSZSo2XBroO5rVvkOZrOZRe8NgaHFMLPn0I6hsqwA7VdKlpbqknax84iWrtBe8ErxgPIQeYhELyK1deW1YWBagD21MBTc2h5LliIlglZg41H8Zl3GvUv0XNZegR5bx1kiM9WFGV9Yt37iQQGquWAMKCAb6AqpkCtKs7sXKaEAVsbh32tlkAg4ngspjwzYHTPYKUuigPX5K8siUfaAW9WJl7r8dc4ju97osWETOcBENLsfwB66TvsttORtOedylnErplZP3hjt7o39JllXDobj3l10bSr4B09eYVWi2DLGavYktKSKj1PrqzuGUaqcFxqoebpuDEAx5vl8ZmSYrmS2RBJ1n2s3lkKdaVWTmfIXlyMMT7Ac3lCXpGNnpf8ccTffv3E0fBrpCSpVc48dM5e5iTpRPrfWxAjrud9jSrqVBXsw3pqUvhuVmBpmwoKAfQGxHrauna3f48AFefGDozxXXjpdM9ZDWHsRUBTFNzDs8tUATtegSzZfNJCS9k0p5q2cueyU1mtwMJIdf0FrsVGiAyX7PFkWvLHi29fpprZQd0gbMMw2Bt10ZbZCsjPX261cXmVa6ZPnkVQm2w1ory3uWejuq20oQCyXTYyv1Ki4tbdPxoNn04Je7uS3QHDCsUl4i9zKNhBJ3g55bhIZWfwmLi3S7oY16gImdC6vvjsMKkCPzXv4pPaVhHH7o4f0mWEz30k4o7GQNOUy8LPM3NmlZF7QaIBdRfozG86jwQkC3jTNR357pdPjOqMERtIS4WEJBgbaeUCu5MOhsNdaD91iCeghIpOECFyTdEkUCGPPCIAtuAOKBdhPu40UxHx30dELMTK3azHOuOnLTsdiM4KJ9yF4Ab2eiz5j2T95sDx3aiEJDVDPCa55hO0XTBM9OSNtdzjdTdZT19XrwD0wPWZcBhfJ66X1uNM2eud1btzglqZP52qqYU7BK2M3BBZKKjy7P6YzmgaPHWnFGHZdwdz3Yq6e3N76Cjkfl8Sy0mkwd6pt0geDM1jNNZrcT8dUfLLaiUqcZm1KRVdpZaBrboDSuCxfWYlxqgsldwlGL4C06ceFUDXX8PzxzWEgOd8OU4F22pcNJOnwJGo6rYA3tvhAuq2WKVg6tgFCb1p7dzF4Ke3J0dv3IneMSNnHG4hkvxW6VzIykDUtYEjMQO35tdnEA0vMVLXIahpJpz4HGs5wwRgoZx1e1zD1pXi7KmEVTlfattgcGFlKjZJ60fEdloZEmiXodxT63CzuJHnjHDOL8qcMzTxHb8OCainga4w1fk4uILLAWqmTFpDcFGSF5lbOFUwhvtMK6knIWZ8ZApZvTGBt1qv3xKUJqPcWiweI4kk57zgyTPZku2mg4fJWDKSfiRSi7LvtpKkdqjein9lP7LMv5lKutprVzjmvHBPjunXGqakWx39xYH8RD6qF3Fw2BnIIesiicZsDv69Ggbu9Y334UeFPNIJ3LGp2I8xcUxlP5dJAh4V05p1HvIZ5Fhk0oCWlvNXdLqzbVsbfW9jWyQTaZXzw7WT3rqFQc7wvw4ayp5eKmUclqB1yOvrI14XGhmH7QMaAYNTIE2RHjYXVgvbmFRi0oB1v4nDEeSTn3KHBRQD8TilCagKg0XYPj2eAgWs12ZRYzlGyCvYZ1pol5wAwc9AFFGwsTJ9UYkbxlZv7wKDx7nFzlUSMC1kMvS2ECwvHzSycqHPRwCGipvG6kWz0mGvASXeKjm47iMROoY0MRK0uvgNdTTOTdxkMgOuCDIlxfit5QKjyzaVAg2kDwENfSd6XPMgSprTSLuNDXdg5NHCwUvDbEHVxpMgOItZymPZtPweOrnPdlEB4UwLZ8jqtShi5oDYvhkh85FwwT25OHFvDUWTTCV5n73pQ8kLo8zsB3mbWfGwg62guj3C50Dh42fAZEPBRSHDRTg3r0z39Vyj490lk2UpZeNyylwuEKmuIqEkbE3BRT2YEjTM8a2PU5grCuzculibcoRUpb1sIQiMRTf4wrtT1CnKcoUJ1T28DC04dTJVRcm3w3WzNLdrnovkX6NahblTzDvq5eXkoEaZv6HClmGuho4FH6s6i0OdmmW8qkNOnk7BhexiyAd3UYERlFwvZ6LP55tFOc3vnlhyylx1rTTgu1NFljRNs7rGiT7SnGFaFK7GITEZFEYI7DmOEUZXxDSHjYuOVN0YAJP2cZFgagyMwGJdrpH8S7cewYPMKz2Go2GBKl1OA6pJ8T91tUdEcGVg9JCMQUA4sBtlIuRTVV3cduIhsLCTi2ewItkh9MRP1kevVa9WcXejQQKreZmq5EZtzThW71r7E2tcvwFeqiwv3JZnV16bZ7NwZT6uvSrOnIFUyMsxhh8xCkVY82VLTAZhPXB8t6CbyjZ5stos6WmNZgoEsD8GU8pmzSTubAqQXkTbiODF2pePe6S9uQ9HngGGBnOjY4QUcAcScDsfflyXVqyxgTelGD4vXoba6qRWCqc9LKpyk4jCKYvLX9tzXusO7bhT2KRvF4MObDqdE4KnCCIF3zeVD0vImR20MmRTBHRCNm3s6GfyeTYEAlW3L2igZJ7Myj5zGLccMt2EohGc38HfWZ4mlvXRLHKB233PyKALYifqlAxTXaWUk13o6nACQDvN7DxSCA0daJeuznK1Dr52bC4IXCTahK1An6LkQMfsXb7Qus6ey241Vb4wTgFHqsdCx7qPxeAghmsTOHRVl\r\n \r\n {"nom": "nom"} benoitc-gunicorn-f5fb19e/tests/requests/invalid/007.py000066400000000000000000000002751514360242400230120ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders benoitc-gunicorn-f5fb19e/tests/requests/invalid/008.http000066400000000000000000000302021514360242400233330ustar00rootroot00000000000000PUT /stuff/here?foo=bar HTTP/1.0\r\n Server: http://127.0.0.1:5984\r\n Content-Type: application/json\r\n Someheader: 08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNBjE3pAeaEc6Vk2ENLlW8WVCe08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNBjE3pAeaE\r\n Someheader: 08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNBjE3pAeaEc6Vk2ENLlW8WVCe\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/008.py000066400000000000000000000002751514360242400230130ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders benoitc-gunicorn-f5fb19e/tests/requests/invalid/009.http000066400000000000000000000034451514360242400233450ustar00rootroot00000000000000PUT /stuff/here?foo=bar HTTP/1.0\r\n Server: http://127.0.0.1:5984\r\n Content-Type: application/json\r\n Content-Length: 14\r\n header0: 0\r\n header1: 1\r\n header2: 2\r\n header3: 3\r\n header4: 4\r\n header5: 5\r\n header6: 6\r\n header7: 7\r\n header8: 8\r\n header9: 9\r\n header10: 10\r\n header11: 11\r\n header12: 12\r\n header13: 13\r\n header14: 14\r\n header15: 15\r\n header16: 16\r\n header17: 17\r\n header18: 18\r\n header19: 19\r\n header20: 20\r\n header21: 21\r\n header22: 22\r\n header23: 23\r\n header24: 24\r\n header25: 25\r\n header26: 26\r\n header27: 27\r\n header28: 28\r\n header29: 29\r\n header30: 30\r\n header31: 31\r\n header32: 32\r\n header33: 33\r\n header34: 34\r\n header35: 35\r\n header36: 36\r\n header37: 37\r\n header38: 38\r\n header39: 39\r\n header40: 40\r\n header41: 41\r\n header42: 42\r\n header43: 43\r\n header44: 44\r\n header45: 45\r\n header46: 46\r\n header47: 47\r\n header48: 48\r\n header49: 49\r\n header50: 50\r\n header51: 51\r\n header52: 52\r\n header53: 53\r\n header54: 54\r\n header55: 55\r\n header56: 56\r\n header57: 57\r\n header58: 58\r\n header59: 59\r\n header60: 60\r\n header61: 61\r\n header62: 62\r\n header63: 63\r\n header64: 64\r\n header65: 65\r\n header66: 66\r\n header67: 67\r\n header68: 68\r\n header69: 69\r\n header70: 70\r\n header71: 71\r\n header72: 72\r\n header73: 73\r\n header74: 74\r\n header75: 75\r\n header76: 76\r\n header77: 77\r\n header78: 78\r\n header79: 79\r\n header80: 80\r\n header81: 81\r\n header82: 82\r\n header83: 83\r\n header84: 84\r\n header85: 85\r\n header86: 86\r\n header87: 87\r\n header88: 88\r\n header89: 89\r\n header90: 90\r\n header91: 91\r\n header92: 92\r\n header93: 93\r\n header94: 94\r\n header95: 95\r\n header96: 96\r\n header97: 97\r\n header98: 98\r\n header99: 99\r\n \r\n {"nom": "nom"} benoitc-gunicorn-f5fb19e/tests/requests/invalid/009.py000066400000000000000000000002751514360242400230140ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders benoitc-gunicorn-f5fb19e/tests/requests/invalid/010.http000066400000000000000000000000541514360242400233260ustar00rootroot00000000000000GET /test HTTP/1.1\r\n Accept: */*\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/010.py000066400000000000000000000004301514360242400227750ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders cfg = Config() cfg.set('limit_request_field_size', 10) benoitc-gunicorn-f5fb19e/tests/requests/invalid/011.http000066400000000000000000000002511514360242400233260ustar00rootroot00000000000000GET /test HTTP/1.1\r\n User-Agent: curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1\r\n Host: 0.0.0.0=5000\r\n Accept: */*\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/011.py000066400000000000000000000004231514360242400230000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders cfg = Config() cfg.set('limit_request_fields', 2) benoitc-gunicorn-f5fb19e/tests/requests/invalid/012.http000066400000000000000000000002511514360242400233270ustar00rootroot00000000000000GET /test HTTP/1.1\r\n User-Agent: curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1\r\n Host: 0.0.0.0=5000\r\n Accept: */*\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/012.py000066400000000000000000000004301514360242400227770ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders cfg = Config() cfg.set('limit_request_field_size', 98) benoitc-gunicorn-f5fb19e/tests/requests/invalid/013.http000066400000000000000000000000611514360242400233270ustar00rootroot00000000000000GET /test HTTP/1.1\r\n Accept:\r\n */*\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/013.py000066400000000000000000000006721514360242400230100ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import LimitRequestHeaders request = LimitRequestHeaders cfg = Config() cfg.set('limit_request_field_size', 14) # once this option is removed, this test should not be dropped; # rather, add something involving unnessessary padding cfg.set('permit_obsolete_folding', True) benoitc-gunicorn-f5fb19e/tests/requests/invalid/014.http000066400000000000000000000001161514360242400233310ustar00rootroot00000000000000PUT /stuff/here?foo=bar HTTP/1.0\r\n CONTENT-LENGTH: -1\r\n \r\n {"test": "-1}benoitc-gunicorn-f5fb19e/tests/requests/invalid/014.py000066400000000000000000000002621514360242400230040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/015.http000066400000000000000000000001301514360242400233260ustar00rootroot00000000000000POST /stuff/here?foo=bar HTTP/1.0\r\n CONTENT-LENGTH: bla-bla-bla\r\n \r\n {"test": "-1}benoitc-gunicorn-f5fb19e/tests/requests/invalid/015.py000066400000000000000000000002621514360242400230050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/016.http000066400000000000000000000000411514360242400233300ustar00rootroot00000000000000PUT s://]ufd/: HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/016.py000066400000000000000000000002741514360242400230110ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestLine request = InvalidRequestLine benoitc-gunicorn-f5fb19e/tests/requests/invalid/017.http000066400000000000000000000200361514360242400233370ustar00rootroot00000000000000GET /test HTTP/1.1\r\n Long-header: 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/017.py000066400000000000000000000003601514360242400230060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import LimitRequestHeaders cfg = Config() request = LimitRequestHeaders benoitc-gunicorn-f5fb19e/tests/requests/invalid/018.http000066400000000000000000000000601514360242400233330ustar00rootroot00000000000000GET /test HTTP/111\r\n Host: localhost\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/018.py000066400000000000000000000002731514360242400230120ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHTTPVersion request = InvalidHTTPVersion benoitc-gunicorn-f5fb19e/tests/requests/invalid/019.http000066400000000000000000000001221514360242400233330ustar00rootroot00000000000000GET /test HTTP/1.1\r\n X-Forwarded-Proto: https\r\n X-Forwarded-Ssl: off\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/019.py000066400000000000000000000004261514360242400230130ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidSchemeHeaders request = InvalidSchemeHeaders cfg = Config() cfg.set('forwarded_allow_ips', '*') benoitc-gunicorn-f5fb19e/tests/requests/invalid/020.http000066400000000000000000000001051514360242400233240ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Content-Length : 3\r\n \r\n xyz benoitc-gunicorn-f5fb19e/tests/requests/invalid/020.py000066400000000000000000000003541514360242400230030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeaderName cfg = Config() request = InvalidHeaderName benoitc-gunicorn-f5fb19e/tests/requests/invalid/021.http000066400000000000000000000001321514360242400233250ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Content-Length: 3\r\n Content-Length: 2\r\n \r\n xyz benoitc-gunicorn-f5fb19e/tests/requests/invalid/021.py000066400000000000000000000003441514360242400230030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/022.http000066400000000000000000000000631514360242400233310ustar00rootroot00000000000000GET /first HTTP/1.0\r\n Content-Length: -0\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/invalid/022.py000066400000000000000000000003441514360242400230040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/023.http000066400000000000000000000000641514360242400233330ustar00rootroot00000000000000GET /first HTTP/1.0\r\n Content-Length: 0_1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/invalid/023.py000066400000000000000000000003441514360242400230050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/024.http000066400000000000000000000000631514360242400233330ustar00rootroot00000000000000GET /first HTTP/1.0\r\n Content-Length: +1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/invalid/024.py000066400000000000000000000003441514360242400230060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/040.http000066400000000000000000000002131514360242400233260ustar00rootroot00000000000000GET /keep/same/as?invalid/040 HTTP/1.0\r\n Transfer_Encoding: tricked\r\n Content-Length: 7\r\n Content_Length: -1E23\r\n \r\n tricked\r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/040.py000066400000000000000000000004151514360242400230030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeaderName from gunicorn.config import Config cfg = Config() cfg.set("header_map", "refuse") request = InvalidHeaderName benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_01.http000066400000000000000000000003031514360242400247440ustar00rootroot00000000000000POST /chunked_w_underscore_chunk_size HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6_0\r\n world\r\n 0\r\n \r\n POST /after HTTP/1.1\r\n Transfer-Encoding: identity\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_01.py000066400000000000000000000002671514360242400244260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidChunkSize request = InvalidChunkSize benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_02.http000066400000000000000000000002221514360242400247450ustar00rootroot00000000000000POST /chunked_with_prefixed_value HTTP/1.1\r\n Content-Length: 12\r\n Transfer-Encoding: \tchunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_02.py000066400000000000000000000002611514360242400244210ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_03.http000066400000000000000000000002111514360242400247440ustar00rootroot00000000000000POST /double_chunked HTTP/1.1\r\n Transfer-Encoding: identity, chunked, identity, chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_03.py000066400000000000000000000002631514360242400244240ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_04.http000066400000000000000000000003121514360242400247470ustar00rootroot00000000000000POST /chunked_twice HTTP/1.1\r\n Transfer-Encoding: identity\r\n Transfer-Encoding: chunked\r\n Transfer-Encoding: identity\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_04.py000066400000000000000000000002611514360242400244230ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_05.http000066400000000000000000000002351514360242400247540ustar00rootroot00000000000000POST /chunked_HTTP_1.0 HTTP/1.0\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 0\r\n Vary: *\r\n Content-Type: text/plain\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_05.py000066400000000000000000000002611514360242400244240ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_06.http000066400000000000000000000002121514360242400247500ustar00rootroot00000000000000POST /chunked_not_last HTTP/1.1\r\n Transfer-Encoding: chunked\r\n Transfer-Encoding: gzip\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_06.py000066400000000000000000000002631514360242400244270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_07.http000066400000000000000000000002401514360242400247520ustar00rootroot00000000000000POST /chunked_ambiguous_header_mapping HTTP/1.1\r\n Transfer_Encoding: gzip\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_07.py000066400000000000000000000004151514360242400244270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeaderName from gunicorn.config import Config cfg = Config() cfg.set("header_map", "refuse") request = InvalidHeaderName benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_08.http000066400000000000000000000002161514360242400247560ustar00rootroot00000000000000POST /chunked_not_last HTTP/1.1\r\n Transfer-Encoding: chunked\r\n Transfer-Encoding: identity\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_08.py000066400000000000000000000002611514360242400244270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHeader request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_09.http000066400000000000000000000001531514360242400247570ustar00rootroot00000000000000POST /chunked_ows_without_ext HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 0 \r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_09.py000066400000000000000000000002671514360242400244360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidChunkSize request = InvalidChunkSize benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_10.http000066400000000000000000000001461514360242400247510ustar00rootroot00000000000000POST /chunked_ows_before HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_10.py000066400000000000000000000002671514360242400244260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidChunkSize request = InvalidChunkSize benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_11.http000066400000000000000000000001501514360242400247450ustar00rootroot00000000000000POST /chunked_ows_before HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\n;\r\n hello\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_11.py000066400000000000000000000002671514360242400244270ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidChunkSize request = InvalidChunkSize benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_12.http000066400000000000000000000001701514360242400247500ustar00rootroot00000000000000POST /chunked_no_chunk_size_but_ext HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n ;foo=bar\r\n hello\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_12.py000066400000000000000000000002671514360242400244300ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidChunkSize request = InvalidChunkSize benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_13.http000066400000000000000000000001471514360242400247550ustar00rootroot00000000000000POST /chunked_no_chunk_size HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n \r\n hello\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/chunked_13.py000066400000000000000000000002671514360242400244310ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidChunkSize request = InvalidChunkSize benoitc-gunicorn-f5fb19e/tests/requests/invalid/invalid_field_value_01.http000066400000000000000000000001451514360242400273140ustar00rootroot00000000000000GET / HTTP/1.1\r\n Host: x\r\n Newline: a\n Content-Length: 26\r\n GET / HTTP/1.1\n Host: x\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/invalid_field_value_01.py000066400000000000000000000003441514360242400267660ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_01.http000066400000000000000000000001001514360242400251210ustar00rootroot00000000000000GETß /germans.. HTTP/1.1\r\n Content-Length: 3\r\n \r\n ÄÄÄ benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_01.py000066400000000000000000000003621514360242400246040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidRequestMethod cfg = Config() request = InvalidRequestMethod benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_02.http000066400000000000000000000000771514360242400251370ustar00rootroot00000000000000GETÿ /french.. HTTP/1.1\r\n Content-Length: 3\r\n \r\n ÄÄÄ benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_02.py000066400000000000000000000003621514360242400246050ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidRequestMethod cfg = Config() request = InvalidRequestMethod benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_03.http000066400000000000000000000001261514360242400251330ustar00rootroot00000000000000GET /germans.. HTTP/1.1\r\n Content-Lengthß: 3\r\n Content-Length: 3\r\n \r\n ÄÄÄ benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_03.py000066400000000000000000000003541514360242400246070ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeaderName cfg = Config() request = InvalidHeaderName benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_04.http000066400000000000000000000001251514360242400251330ustar00rootroot00000000000000GET /french.. HTTP/1.1\r\n Content-Lengthÿ: 3\r\n Content-Length: 3\r\n \r\n ÄÄÄ benoitc-gunicorn-f5fb19e/tests/requests/invalid/nonascii_04.py000066400000000000000000000003541514360242400246100ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeaderName cfg = Config() request = InvalidHeaderName benoitc-gunicorn-f5fb19e/tests/requests/invalid/obs_fold_01.http000066400000000000000000000001031514360242400251100ustar00rootroot00000000000000GET / HTTP/1.1\r\n Long: one\r\n two\r\n Host: localhost\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/obs_fold_01.py000066400000000000000000000002661514360242400245730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import ObsoleteFolding request = ObsoleteFolding benoitc-gunicorn-f5fb19e/tests/requests/invalid/pp_01.http000066400000000000000000000000451514360242400237450ustar00rootroot00000000000000PROXY TCP4 192.168.0.1 192.16...\r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/pp_01.py000066400000000000000000000004131514360242400234150ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidProxyLine cfg = Config() cfg.set("proxy_protocol", True) request = InvalidProxyLine benoitc-gunicorn-f5fb19e/tests/requests/invalid/pp_02.http000066400000000000000000000000651514360242400237500ustar00rootroot00000000000000PROXY TCP4 192.168.0.1 192.168.0.11 65iii 100000\r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/pp_02.py000066400000000000000000000004131514360242400234160ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidProxyLine cfg = Config() cfg.set('proxy_protocol', True) request = InvalidProxyLine benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_01.http000066400000000000000000000000421514360242400246200ustar00rootroot00000000000000GET\0PROXY /foo HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_01.py000066400000000000000000000002761514360242400243020ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestMethod request = InvalidRequestMethodbenoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_02.http000066400000000000000000000000351514360242400246230ustar00rootroot00000000000000GET\0 /foo HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_02.py000066400000000000000000000002761514360242400243030ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidRequestMethod request = InvalidRequestMethodbenoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_03.http000066400000000000000000000001041514360242400246210ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Content-Length: 0 1\r\n \r\n x benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_03.py000066400000000000000000000003441514360242400243000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_04.http000066400000000000000000000001151514360242400246240ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Content-Length: 3 1\r\n \r\n xyz abc123 benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_04.py000066400000000000000000000003441514360242400243010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHeader cfg = Config() request = InvalidHeader benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_05.http000066400000000000000000000001051514360242400246240ustar00rootroot00000000000000GET: /stuff/here?foo=bar HTTP/1.1\r\n Content-Length: 3\r\n \r\n xyz benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_05.py000066400000000000000000000003621514360242400243020ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidRequestMethod cfg = Config() request = InvalidRequestMethod benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_06.http000066400000000000000000000002271514360242400246320ustar00rootroot00000000000000GET /the/future HTTP/1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111.1\r\n Content-Length: 7\r\n \r\n Old Man benoitc-gunicorn-f5fb19e/tests/requests/invalid/prefix_06.py000066400000000000000000000003561514360242400243060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config from gunicorn.http.errors import InvalidHTTPVersion cfg = Config() request = InvalidHTTPVersion benoitc-gunicorn-f5fb19e/tests/requests/invalid/version_01.http000066400000000000000000000000341514360242400250110ustar00rootroot00000000000000GET /foo HTTP/0.99\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/version_01.py000066400000000000000000000002731514360242400244670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHTTPVersion request = InvalidHTTPVersion benoitc-gunicorn-f5fb19e/tests/requests/invalid/version_02.http000066400000000000000000000000331514360242400250110ustar00rootroot00000000000000GET /foo HTTP/2.0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/invalid/version_02.py000066400000000000000000000002731514360242400244700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import InvalidHTTPVersion request = InvalidHTTPVersion benoitc-gunicorn-f5fb19e/tests/requests/valid/000077500000000000000000000000001514360242400215775ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/requests/valid/001.http000066400000000000000000000002251514360242400227770ustar00rootroot00000000000000PUT /stuff/here?foo=bar HTTP/1.0\r\n Server: http://127.0.0.1:5984\r\n Content-Type: application/json\r\n Content-Length: 14\r\n \r\n {"nom": "nom"} benoitc-gunicorn-f5fb19e/tests/requests/valid/001.py000066400000000000000000000005751514360242400224600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "PUT", "uri": uri("/stuff/here?foo=bar"), "version": (1, 0), "headers": [ ("SERVER", "http://127.0.0.1:5984"), ("CONTENT-TYPE", "application/json"), ("CONTENT-LENGTH", "14") ], "body": b'{"nom": "nom"}' } benoitc-gunicorn-f5fb19e/tests/requests/valid/002.http000066400000000000000000000002501514360242400227760ustar00rootroot00000000000000GET /test HTTP/1.1\r\n User-Agent: curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1\r\n Host: 0.0.0.0=5000\r\n Accept: */*\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/002.py000066400000000000000000000006221514360242400224520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/test"), "version": (1, 1), "headers": [ ("USER-AGENT", "curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1"), ("HOST", "0.0.0.0=5000"), ("ACCEPT", "*/*") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/003.http000066400000000000000000000006121514360242400230010ustar00rootroot00000000000000GET /favicon.ico HTTP/1.1\r\n Host: 0.0.0.0=5000\r\n User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9) Gecko/2008061015 Firefox/3.0\r\n Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n Accept-Language: en-us,en;q=0.5\r\n Accept-Encoding: gzip,deflate\r\n Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n Keep-Alive: 300\r\n Connection: keep-alive\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/003.py000066400000000000000000000012531514360242400224540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/favicon.ico"), "version": (1, 1), "headers": [ ("HOST", "0.0.0.0=5000"), ("USER-AGENT", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9) Gecko/2008061015 Firefox/3.0"), ("ACCEPT", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"), ("ACCEPT-LANGUAGE", "en-us,en;q=0.5"), ("ACCEPT-ENCODING", "gzip,deflate"), ("ACCEPT-CHARSET", "ISO-8859-1,utf-8;q=0.7,*;q=0.7"), ("KEEP-ALIVE", "300"), ("CONNECTION", "keep-alive") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/004.http000066400000000000000000000000711514360242400230010ustar00rootroot00000000000000GET /silly HTTP/1.1\r\n aaaaaaaaaaaaa:++++++++++\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/004.py000066400000000000000000000004161514360242400224550ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/silly"), "version": (1, 1), "headers": [ ("AAAAAAAAAAAAA", "++++++++++") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/005.http000066400000000000000000000000761514360242400230070ustar00rootroot00000000000000GET /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/005.py000066400000000000000000000004031514360242400224520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/forums/1/topics/2375?page=1#posts-17408"), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/006.http000066400000000000000000000000631514360242400230040ustar00rootroot00000000000000GET /get_no_headers_no_body/world HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/006.py000066400000000000000000000003701514360242400224560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/get_no_headers_no_body/world"), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/007.http000066400000000000000000000000751514360242400230100ustar00rootroot00000000000000GET /get_one_header_no_body HTTP/1.1\r\n Accept: */*\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/007.py000066400000000000000000000004211514360242400224540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/get_one_header_no_body"), "version": (1, 1), "headers": [ ("ACCEPT", "*/*") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/008.http000066400000000000000000000001111514360242400230000ustar00rootroot00000000000000GET /unusual_content_length HTTP/1.0\r\n conTENT-Length: 5\r\n \r\n HELLObenoitc-gunicorn-f5fb19e/tests/requests/valid/008.py000066400000000000000000000004341514360242400224610ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/unusual_content_length"), "version": (1, 0), "headers": [ ("CONTENT-LENGTH", "5") ], "body": b"HELLO" } benoitc-gunicorn-f5fb19e/tests/requests/valid/009.http000066400000000000000000000002111514360242400230020ustar00rootroot00000000000000POST /post_identity_body_world?q=search#hey HTTP/1.1\r\n Accept: */*\r\n Transfer-Encoding: identity\r\n Content-Length: 5\r\n \r\n Worldbenoitc-gunicorn-f5fb19e/tests/requests/valid/009.py000066400000000000000000000005621514360242400224640ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/post_identity_body_world?q=search#hey"), "version": (1, 1), "headers": [ ("ACCEPT", "*/*"), ("TRANSFER-ENCODING", "identity"), ("CONTENT-LENGTH", "5") ], "body": b"World" } benoitc-gunicorn-f5fb19e/tests/requests/valid/010.http000066400000000000000000000002061514360242400227760ustar00rootroot00000000000000POST /post_chunked_all_your_base HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 1e\r\n all your base are belong to us\r\n 0\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/010.py000066400000000000000000000005041514360242400224500ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/post_chunked_all_your_base"), "version": (1, 1), "headers": [ ("TRANSFER-ENCODING", "chunked"), ], "body": b"all your base are belong to us" } benoitc-gunicorn-f5fb19e/tests/requests/valid/011.http000066400000000000000000000001751514360242400230040ustar00rootroot00000000000000POST /two_chunks_mult_zero_end HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 000\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/011.py000066400000000000000000000004561514360242400224570ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/two_chunks_mult_zero_end"), "version": (1, 1), "headers": [ ("TRANSFER-ENCODING", "chunked") ], "body": b"hello world" } benoitc-gunicorn-f5fb19e/tests/requests/valid/012.http000066400000000000000000000002461514360242400230040ustar00rootroot00000000000000POST /chunked_w_trailing_headers HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 0\r\n Vary: *\r\n Content-Type: text/plain\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/012.py000066400000000000000000000006071514360242400224560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/chunked_w_trailing_headers"), "version": (1, 1), "headers": [ ("TRANSFER-ENCODING", "chunked") ], "body": b"hello world", "trailers": [ ("VARY", "*"), ("CONTENT-TYPE", "text/plain") ] } benoitc-gunicorn-f5fb19e/tests/requests/valid/013.http000066400000000000000000000002371514360242400230050ustar00rootroot00000000000000POST /chunked_w_extensions HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5; some; parameters=stuff\r\n hello\r\n 6; blahblah; blah\r\n world\r\n 0\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/013.py000066400000000000000000000004521514360242400224550ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/chunked_w_extensions"), "version": (1, 1), "headers": [ ("TRANSFER-ENCODING", "chunked") ], "body": b"hello world" } benoitc-gunicorn-f5fb19e/tests/requests/valid/014.http000066400000000000000000000000561514360242400230050ustar00rootroot00000000000000GET /with_"quotes"?foo="bar" HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/014.py000066400000000000000000000003631514360242400224570ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri('/with_"quotes"?foo="bar"'), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/015.http000066400000000000000000000001421514360242400230020ustar00rootroot00000000000000GET /test HTTP/1.0\r\n Host: 0.0.0.0:5000\r\n User-Agent: ApacheBench/2.3\r\n Accept: */*\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/015.py000066400000000000000000000005141514360242400224560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/test"), "version": (1, 0), "headers": [ ("HOST", "0.0.0.0:5000"), ("USER-AGENT", "ApacheBench/2.3"), ("ACCEPT", "*/*") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/017.http000066400000000000000000000001331514360242400230040ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.0\r\n If-Match: bazinga!\r\n If-Match: large-sound\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/017.py000066400000000000000000000004711514360242400224620ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/stuff/here?foo=bar"), "version": (1, 0), "headers": [ ("IF-MATCH", "bazinga!"), ("IF-MATCH", "large-sound") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/018.http000066400000000000000000000000721514360242400230070ustar00rootroot00000000000000GET /first HTTP/1.1\r\n \r\n GET /second HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/018.py000066400000000000000000000005541514360242400224650ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. req1 = { "method": "GET", "uri": uri("/first"), "version": (1, 1), "headers": [], "body": b"" } req2 = { "method": "GET", "uri": uri("/second"), "version": (1, 1), "headers": [], "body": b"" } request = [req1, req2] benoitc-gunicorn-f5fb19e/tests/requests/valid/019.http000066400000000000000000000000721514360242400230100ustar00rootroot00000000000000GET /first HTTP/1.0\r\n \r\n GET /second HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/019.py000066400000000000000000000003411514360242400224600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/first"), "version": (1, 0), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/020.http000066400000000000000000000001211514360242400227730ustar00rootroot00000000000000GET /first HTTP/1.0\r\n Content-Length: 24\r\n \r\n GET /second HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/020.py000066400000000000000000000004251514360242400224530ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/first"), "version": (1, 0), "headers": [('CONTENT-LENGTH', '24')], "body": b"GET /second HTTP/1.1\r\n\r\n" } benoitc-gunicorn-f5fb19e/tests/requests/valid/021.http000066400000000000000000000001201514360242400227730ustar00rootroot00000000000000GET /first HTTP/1.1\r\n Connection: Close\r\n \r\n GET /second HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/021.py000066400000000000000000000003701514360242400224530ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/first"), "version": (1, 1), "headers": [("CONNECTION", "Close")], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/022.http000066400000000000000000000001251514360242400230010ustar00rootroot00000000000000GET /first HTTP/1.0\r\n Connection: Keep-Alive\r\n \r\n GET /second HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/022.py000066400000000000000000000006101514360242400224510ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. req1 = { "method": "GET", "uri": uri("/first"), "version": (1, 0), "headers": [("CONNECTION", "Keep-Alive")], "body": b"" } req2 = { "method": "GET", "uri": uri("/second"), "version": (1, 1), "headers": [], "body": b"" } request = [req1, req2] benoitc-gunicorn-f5fb19e/tests/requests/valid/023.http000066400000000000000000000002331514360242400230020ustar00rootroot00000000000000POST /two_chunks_mult_zero_end HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 000\r\n \r\n GET /second HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/023.py000066400000000000000000000006711514360242400224610ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. req1 = { "method": "POST", "uri": uri("/two_chunks_mult_zero_end"), "version": (1, 1), "headers": [ ("TRANSFER-ENCODING", "chunked") ], "body": b"hello world" } req2 = { "method": "GET", "uri": uri("/second"), "version": (1, 1), "headers": [], "body": b"" } request = [req1, req2] benoitc-gunicorn-f5fb19e/tests/requests/valid/024.http000066400000000000000000000400301514360242400230020ustar00rootroot00000000000000PUT /q=08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNBjE3pAeaEc6Vk2ENLlW8WVCe08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNjE62a HTTP/1.0\r\n Someheader: 0X0VfvRJPKiUBYDUS0Vbdm9Rv6pQ1giLdvXeG1SbOwwEjzKceTxd5RKlt9KHVdQkZPqnZ3jLsuj67otzLqX0Q1dY1EsBI1InsyGc2Dxdr5o7W5DsBGYV0SDMyta3V9bmBJXJQ6g8R9qPtNrED4eIPvVmFY7aokhFb4TILl5UnL8qI6qqiyniYDaPVMxDlZaoCNkDbukO34fOUJD6ZN541qmjWEq1rvtAYDI77mkzWSx5zOkYd62RFmY7YKrQC5gtIVq8SBLp09Ao53S3895ABRcxjrg99lfbgLQFYwbM4FQ6ab1Ll2uybZyEU8MHPt5Czst0cRsoG819SBphxygWcCNwB93KGLi1K9eiCuAgx6Ove165KObLrvfA1rDI5hiv83Gql0UohgKtHeRmtqM0McnCO1VWAnFxpi1hxIAlBrR4w35EcaryGEKKcL34QyzD1zlF4mkQkr1EAOTgIMKoLipGUgykz7UFN1cCuWyo3CkdZvukBS3IGtEfxFuFCcnp70WTIjZxXxU4owMbWW1ER5Gsx0ilET0mzekZL0ngCikNP2BRQikRdlVBQ3eiLzDjq27UAm7ufQ9MJla8Yxd6Ea37un9DMltQwGmnmeG5pET54STq72qfY4HCerWHbCX1qwHTErMfEfIWcYldDfytUTOj7NcWRga3xW7JYpPZHdlkb24evup3lI4arY6j5a12ZcX9zVI02IJG0QD9T4zSHEV0pdVFZ8xwOlSWKuZ9VZMmRyOwmfhIPA7fDV5SP8weRlSnSCSN4YBAfzFVNfPTyeoSfVpXsxIABhXEQTg12YvAAn9390wFhEhMsT9FWIiIs7oH63tQyjdEAZSJcZ0nSQfapvi4BDsQSMv3W2DofSzxwOPrVQWRMyvP0UV0J660Gc4iZ2Tixe3DSeqg9VuNvij09aCbkBdwJh9r4UWmM1Hp1ZDF5Rr14nKtFAgjVlGlfZi4bWQKTzOlqaVbWBvxdKsJ27eelyDnasIPqo17yY5lg10Lb8nyu60Wn7l7Xb0Ndp334B5am4Vh1foctvkkhNFeIejtnjPYmWjS77rJ1aL0zJka4Xog5Oparvc93Pddf9CzCxgle00BTKNj0syVo5uqvX5PVzdhAnigU4jdPbJbcPpbpJRU4UDqIswRNJOlGfpdLnCvnPIRB2a7btjFTaE0tne0TjedGbePje1Li21rPXPX7t5LICWl1SRyqQ9x9woGEv1sI5VgpRoKtS6oxWgMERjP3LcEez3XqLiSwv0rWMlDiJhxEopz8Mklx8ZygQLiwIYx2pNq0JhKB8K1lZ8dYE5d3nRWhXwG4gFTUg2JYjnjL81WGRmjXnZEVLwYfYBUkRlqWAYHi1E6wF85BfcwvkgnEeBTiQSlfu6xwCYaW2OEogq7tbdinvlpeEPij1qQivpcs573HPHpkXrEeXC9P2gZhmV1Rvn69NAN2lOXSVe8XotSyCG5fHFsTDYlOvYW8EBrAdWuZrwU753xwjk3QCp2ODetYze98voig4lfYHrrWT43VXcHt8J5z7U3kt5O460buwESBhgkALZdrFYyy4YQcmnAeSCw5OoLArDEmzaI4JkFBCDqQxTE9BTYA112r9ymuOo5MGkTDYZlvtvopG4ekorfLoIa13Z9L6ZilXT1cg55dvNlOrbTSHpQTYRJfJ6x71IpDFyvdbZbOHQYMm98fcN9CLqFErkpcN4JO26GIhSodGGTSnzyUxBYueawFNlGxCMTa6JseX9c7Xlo8NRaZHBPvG7Z4gUCkOdUSEW0RRTs3TSSdjEKnJ6u9RdDqqyvN8cJ7gliTd04mSyVnkmxdqVU8DrdIrkSCfVQNoFgdydDHS3wMLU6QGTGBzK5pd9EfsDEeYXtIb3CkRupM4SERGMTN8TyIxqqIyWmgjBmSGLTFOB5tsPhkVydVQNf7jBkDy6THfBy0uALVUkm2jLeTFXjajyeL4ms5Lgx0eLoz0XWN6WulXSA20zV3ObSCHbBeVUgKmPxHq5qPmAi04VFIvCOJ0rBQJh9ZHJMwvhI3VEBF6EmXOiRCn0XOhm3pfHlmaCAWrOSGuQs3NCNlFRjwmVRPY5FJrKYjH3FrLrLdU07zdViAix8C4LxVrRrMB6ligZC3CoDhFA4vMjiPU5SBRqRW4lwVnvMZEZbf0AYbBc2ymnKAOWbQwt2ldiI2qL0aLoL6YtSFUhpwMOR3LP1feUq6XRO5xc9V02nEt9MRQsl5MgmKMcXap4HqAN0yATpjAGRnWqEnE7E1XZg95cEl2gO4HXejKzR0kiTUudcw6P4t1RYLRx7isZNJxiq1JZz6FpEe7QhwGbhPySNMbXJtmYuhAaTpfGdGKMxvHHB9LmELOChdyfjHMwMZ2B0xgU2eJgJimCwLH3UEmExgAwJDD4GSCqevYAMK4P9FKPl0dku0KZ7uOJ8oNloEsrbvMuhuKFDuO1PNvxtdCcgASzNVzdueOtUm1giZIDqbb6j11nqi9NoFeck1zZi2kfGF7OeUp4vYszuhQNi4vd03QeVAduM9h9v36Nz1YobRxB2CjTp6qdKdW9IYBp8aExZpipnJIbfD2hTWE44kIu7Q17f4C9kycGjsLwAWkVbfTRmBMU8SbVKV1EJTrN1gGqGX7quSwg1Vp4qslKAk6EIkoReIl5DuzuH8Rbvrkp5LFFAhNhb1hvXvVWcibtDjQSradNtuYzGf2AAduhxOTnZjzbsceGYhQA5a5NtqxE2GBlW8CPoPzIyfMfPjdAIUmAcns7Fkp44nju2htwhryUyidEzDVyTwevquARjt5a7eu8qIKfPrYgbOAlPgA1JHNi55ivTNpDuQ8drNiafZIntA43HI447WtITYYvLxFRG8OWvJRwI0N7dvHYO8H8lYI1OwatfvLKlJqjtdJBBvMWXdT4SbxHUdNTDUQmqFGZaLx1AvYPnJTYRzrqn5ZnXyWQ1ZCwtvZK209TxoezJ2sGorE46C7Zyki6EcXlX2A8upUUh9IhqLYTzidIRrAPE5mZmosyDyShjnRiN5CLXZAI21eV4v3a6WXI8TKkUk3fhhajOgPXshlyCEfDAyESpz1J8RECu6vQs81E1ZNE5ha5UGw2wk3Ea8oSTfqTiu0OeisV2a6bfldvW4x0OL8PS57uuY0v0OZPSUPWmPQgnmJRVw8vmh62bpFekMnUH7y31fXU6MIyZaiBs1FEu7qF6irBszHt2ARy50SjgGwQZWcecgvB8gB874g3ES9mZer3diYGF3Wssmsm6XRdsNcuNn3yzuoi52cRrBYUOISegTBVApn4zfuCC9Y4AAfe6wmmiuN8hL6KJeOjrdK5EFQHGyrzeuIMaT3B2nKz1PNONVQ0udbqCQebz3cq7NPe6kGKFLiE6euWjdoMuAbuu8rTkAa42ensXz4a1Yo450ZVgYypaDtepDQWFkJyTHDW1HTVZfCok0tp7STRiQ8n3NKxOUSL9veuTsDs1FaV2rbzR3DvkEJrhJ10Rm0pvLgui5GUDKyWLnrqcNVtOIzFaj9K5pwMfnREm1VIs84ePX0GsMjirfOfubzDoYjavbiCtTB86nKx0tfCKtl0yUQ5PWSBqdGASY3mr5hZcFZ9bA6uXXGTNqMpUH3gqxCoF6t2yAim93t77jYkiFt3OBlBRVQzRsPbgEKRXbX3bWQj6NpDzNCQPYTs45HsQB967f4yByzLH8X289YAZJhJJyFTMCLbpdKFuMBX5Msyr4d15sBa1h5bI13dqU14WBnMKD12LkHMjHiyde6xf5EELf082sUfiAZaROFuDCDnA89p6y6oYEUgF1L9yQElZO4R6IrkJsEFN9hvARf3CH4ENqbYxtUN9gsB9CLCGKMy2R4wGKU3Dkyea27YCR4QHCdqX3HqOpy12uxBANvbrfEro9q5NJrGK7WVq3nNabN05x4TmIZk3asc8ehvDyhSgQLY0wwyvrkcYqNiETybJ57RjwVg1YE0IZEBfyAUNXE4goc2jtbZbHfcpTzt08pSJQZTAzuxrdQLS4EnaFHPpMdPh1YXUdclj6g2sjYbhoTYcV97bVDAUztMZ4EarUcv6tgQOvK66RmJCF2zVEpFDBS6AVZJWzrVlnuiweXpH0L9eY2Wy2EuAHi7gL4o0i0AkOapqY1TPUWUwBaVrKQzkL8QQbczgc97pMvSnGYMlcSdzlamFtUmRoOPmhBGMpVqmcxnstnqJ0TXMV65zbRN2hk3YVF5HwPjuWJmfkVYnyazuqKuaaohrQIe7YOOSAmD7C2vDnI50y1oScQqIPb87QAmguFz7jfNBSPymjPJ7UrToaJen7LEQr8S2b69ayZYNIyWbcpaW5ACUqdyT5AeHYhdENORnWS2B17qnBPtyvb4WujJCafLmsMFhQbcGonDZkHEOAnOcwRwJ4KIPr4MlQLRKsdnurPDDEmpCtCnFg8vPObOPHoHgICb9j35pG1YNhAAGIGTZ4g3JTJzFvTcW7GDRxREPZffKOuQTJoMYYaaPwnE0SainEpCFAukJbDy1ss5cZt60nqTw1asLzwMKJu5PHpU9sB9YN7J2cPhIbfb4387zSmSvqbt3I8NFjDbuYEhe6nZ7gRT5Th0W0MoyzHlmy4MSXbaAfUJNsLQJmdhdVKDsqMz0aXKIVNsXtn88owrhw0yqxU0K3IfTothafhpQ8daRUnbjzULViWRvUz7dI1N3GgylRzaEXQPgbj0DQ7RujNTcJoSp7I1ELjFFSBZDm4Jx5eXq0aS2SKJPFX7XmFfkkR99wRiHx4ByVTL5umojRhY5j8vg3l3yfliJbeOTXckaYiezrucuHaiVFWR2kjk9PUm57bDpvtSFMic652iDufj4hqpy5MH5r2lg67T6Bbb3fcq49cVJ3hkN2GfRqVhoPxmHyvotu5koheVh7oHDaLaf4VvcQMd5MF8sicaX3GXfoLjlfFZwfJBpXNbbVemD7XghpIEwuFjA1USU8yJnTdvCJ2bFmPNWFeWsBVDyl7XUsbgB3K2zz806xODZT639dqiqhGXQNbgYtShikQhiHhZF4wf4IY588LE4EO2bdXBb2Wezm8Gl2J5GAfqnx5Z6NF7h1gGkM27hpnmKNylKZjqTNANj0CRU4awpdVrYGX7hT0u452Y5bXpVl7cLuK7j2k7VG93NXPsXADhQA8R9WDcpU0PLzFWFq1omoQ9ZRSlvh8R4pRp4vHIYf4A5uQEmv5Owr4pFQcWdp5GAdkpBaSHvUhvMxOSpsqVB2LHvvs1RiOUHHhHdZEKpX25mK9moud8pKT4efru1SlRRSsxdz87hTJMUrueydHDPXbo9AvExctdqxuCk03Fy8cB57qrkQQ50oGNuTNPColMrwVfmuTt81uSZremLbINILnCVXEnvTugRQfFYMnprqMB4mVJfZfh6XVLdOyW4BPaFrBsZGFy7udoWJwE8ACx4UpJW6m1ltckofzA6AUxzXprXDCCL118m8bBB2hzDKmqeLk5ZYKsLROkTqRAxmJjBSZSo2XBroO5rVvkOZrOZRe8NgaHFMLPn0I6hsqwA7VdKlpbqknax84iWrtBe8ErxgPIQeYhELyK1deW1YWBagD21MBTc2h5LliIlglZg41H8Zl3GvUv0XNZegR5bx1kiM9WFGV9Yt37iQQGquWAMKCAb6AqpkCtKs7sXKaEAVsbh32tlkAg4ngspjwzYHTPYKUuigPX5K8siUfaAW9WJl7r8dc4ju97osWETOcBENLsfwB66TvsttORtOedylnErplZP3hjt7o39JllXDobj3l10bSr4B09eYVWi2DLGavYktKSKj1PrqzuGUaqcFxqoebpuDEAx5vl8ZmSYrmS2RBJ1n2s3lkKdaVWTmfIXlyMMT7Ac3lCXpGNnpf8ccTffv3E0fBrpCSpVc48dM5e5iTpRPrfWxAjrud9jSrqVBXsw3pqUvhuVmBpmwoKAfQGxHrauna3f48AFefGDozxXXjpdM9ZDWHsRUBTFNzDs8tUATtegSzZfNJCS9k0p5q2cueyU1mtwMJIdf0FrsVGiAyX7PFkWvLHi29fpprZQd0gbMMw2Bt10ZbZCsjPX261cXmVa6ZPnkVQm2w1ory3uWejuq20oQCyXTYyv1Ki4tbdPxoNn04Je7uS3QHDCsUl4i9zKNhBJ3g55bhIZWfwmLi3S7oY16gImdC6vvjsMKkCPzXv4pPaVhHH7o4f0mWEz30k4o7GQNOUy8LPM3NmlZF7QaIBdRfozG86jwQkC3jTNR357pdPjOqMERtIS4WEJBgbaeUCu5MOhsNdaD91iCeghIpOECFyTdEkUCGPPCIAtuAOKBdhPu40UxHx30dELMTK3azHOuOnLTsdiM4KJ9yF4Ab2eiz5j2T95sDx3aiEJDVDPCa55hO0XTBM9OSNtdzjdTdZT19XrwD0wPWZcBhfJ66X1uNM2eud1btzglqZP52qqYU7BK2M3BBZKKjy7P6YzmgaPHWnFGHZdwdz3Yq6e3N76Cjkfl8Sy0mkwd6pt0geDM1jNNZrcT8dUfLLaiUqcZm1KRVdpZaBrboDSuCxfWYlxqgsldwlGL4C06ceFUDXX8PzxzWEgOd8OU4F22pcNJOnwJGo6rYA3tvhAuq2WKVg6tgFCb1p7dzF4Ke3J0dv3IneMSNnHG4hkvxW6VzIykDUtYEjMQO35tdnEA0vMVLXIahpJpz4HGs5wwRgoZx1e1zD1pXi7KmEVTlfattgcGFlKjZJ60fEdloZEmiXodxT63CzuJHnjHDOL8qcMzTxHb8OCainga4w1fk4uILLAWqmTFpDcFGSF5lbOFUwhvtMK6knIWZ8ZApZvTGBt1qv3xKUJqPcWiweI4kk57zgyTPZku2mg4fJWDKSfiRSi7LvtpKkdqjein9lP7LMv5lKutprVzjmvHBPjunXGqakWx39xYH8RD6qF3Fw2BnIIesiicZsDv69Ggbu9Y334UeFPNIJ3LGp2I8xcUxlP5dJAh4V05p1HvIZ5Fhk0oCWlvNXdLqzbVsbfW9jWyQTaZXzw7WT3rqFQc7wvw4ayp5eKmUclqB1yOvrI14XGhmH7QMaAYNTIE2RHjYXVgvbmFRi0oB1v4nDEeSTn3KHBRQD8TilCagKg0XYPj2eAgWs12ZRYzlGyCvYZ1pol5wAwc9AFFGwsTJ9UYkbxlZv7wKDx7nFzlUSMC1kMvS2ECwvHzSycqHPRwCGipvG6kWz0mGvASXeKjm47iMROoY0MRK0uvgNdTTOTdxkMgOuCDIlxfit5QKjyzaVAg2kDwENfSd6XPMgSprTSLuNDXdg5NHCwUvDbEHVxpMgOItZymPZtPweOrnPdlEB4UwLZ8jqtShi5oDYvhkh85FwwT25OHFvDUWTTCV5n73pQ8kLo8zsB3mbWfGwg62guj3C50Dh42fAZEPBRSHDRTg3r0z39Vyj490lk2UpZeNyylwuEKmuIqEkbE3BRT2YEjTM8a2PU5grCuzculibcoRUpb1sIQiMRTf4wrtT1CnKcoUJ1T28DC04dTJVRcm3w3WzNLdrnovkX6NahblTzDvq5eXkoEaZv6HClmGuho4FH6s6i0OdmmW8qkNOnk7BhexiyAd3UYERlFwvZ6LP55tFOc3vnlhyylx1rTTgu1NFljRNs7rGiT7SnGFaFK7GITEZFEYI7DmOEUZXxDSHjYuOVN0YAJP2cZFgagyMwGJdrpH8S7cewYPMKz2Go2GBKl1OA6pJ8T91tUdEcGVg9JCMQUA4sBtlIuRTVV3cduIhsLCTi2ewItkh9MRP1kevVa9WcXejQQKreZmq5EZtzThW71r7E2tcvwFeqiwv3JZnV16bZ7NwZT6uvSrOnIFUyMsxhh8xCkVY82VLTAZhPXB8t6CbyjZ5stos6WmNZgoEsD8GU8pmzSTubAqQXkTbiODF2pePe6S9uQ9HngGGBnOjY4QUcAcScDsfflyXVqyxgTelGD4vXoba6qRWCqc9LKpyk4jCKYvLX9tzXusO7bhT2KRvF4MObDqdE4KnCCIF3zeVD0vImR20MmRTBHRCNm3s6GfyeTYEAlW3L2igZJ7Myj5zGLccMt2EohGc38HfWZ4mlvXRLHKB233PyKALYifqlAxTXaWUk13o6nACQDvN7DxSCA0daJeuznK1Dr52bC4IXCTahK1An6LkQMfsXb7Qus6ey241Vb4wTgFHqsdCx7qPxeAghmsTOHRVl\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/024.py000066400000000000000000000405511514360242400224630ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set('limit_request_line', 0) cfg.set('limit_request_field_size', 0) request = { "method": "PUT", "uri": uri("/q=08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNBjE3pAeaEc6Vk2ENLlW8WVCe08aP8931Ltyl9nqyJvjMaRCOgDV3uONtAdHABjoZUG6KAP6h3Vh97O3GJjjovXYgNdrhxc7TriXoAmeehZMJx88EyhcPXO0f09Nvd128SZnxZ2r5jFDELkn26reKRysODSLBZLfjU3vxLzLXKWeFOFJKcZYRH9V7hC98DDS4ZsS7weUksBuK6m86aLNHHHB0Xbyxv1TiDbOWYIzKxV0eZKyk0CaDLDiR0CRuMOf4rwBeuHoMrumzafrFI5iL72ANQZmOvKdk1qQeXkRqEG11YU0kF7f1hSlmgiIgg5maWiBsA9sAg36IIXZMWwJF63zpMgAyjTT8l4pQhSBfhY2xbGAWmLGpyd1rlBm0O5LCoKpnQuTACm2azi0x6a1Qbry9flQBO4jHge2dXiD1si6Gh5q8fZu8ZQ7LLWii2u4rGB7E4XlhnClrCHg5vJmjYf2AItYPA0ogsiIdEEQGpzMJPqrp8Icn5kAAimWF1aCYaDjcdSgWI48PnoxlzIHX50EPFcPOSLecjkstD9z66H554sUXfWn3Mk9lnOUlse6nx0u1YClFK4UFXp98ru9eBBr7pkAsfZ34yPskayGyXPPyzWyBfVd28UuvdEG47SMdyqEpX0rFdk67fAYij0PWMK79mDmGAS37O821o18XUbu0GQjsqAGVMN9LDIAliD9QqtlwdEnplKkUyyZ7GAFJCFffgzppU9CjA2FbPX6ZjTOi4sPoYEyhyeQKVqAe9keYeDpU2qDwq83XEDQUKvP0w48GyavSmdBcrMXjUsu0PfdYpSaKwarrUB3i93HgoQB3ZJIR4lW6iPRTmm28OEKq2MIJGAoTXxCZYM5UacRldlqQOj6JkYz6y7ppWOjJ9yiCUEenuvfcItgmw9HIgGA59JxO8NDLEZLSONfuIgiV7wjsJnxuTOlU4vkjV7fTuOeU91xez7UKhaTqqEW3XBUSLjhKi3IkZg7ukrGZTWPhijFv2EZwEWDAyLlHvZB4X738zGJUlEX1k52EHwrKVKdLfePcaOjAGKsongHBFYxYC8vBBLuKm9RWexKCT14M25pCGloJXZ4OpBRfDQA2kobLUcEXEpzqRBPGN2JdNSBOFlUtUxWKnnPBM6r9S356l3k1o9zTIPeoIitWRjASs4A0iwYc8p5vv5Kt8KtsmW7Xv8dlU8HbZHsy3LI7O9BpUH8cJubqdEhooKABkx71pdcsZGhZb6epyTiPyvOhdJ7tNtFy3KQOameqTgGyd53Z42eZ0AjaOEvnzermi2E0xo3MMHFhB74TFtNAI3ppxxyqknc1mzUqZ49Wi8YPBg9ids6IgZvddBQYvwEozkmyGAkatQtt9TD4LjU3TyyUlhNG21q7CzEEl8NNsVrV6QyHsfw7E5w7XcoT7OQkBYoZwHIAjfekehnpc2llRtRY5m43fPVasmsVazOR36DRSLZJPHAqUDO0LInu9mgP57Mnz9CgylEmdE2aaYs426rnTFR3G3CfjLofHfjaLOkAegr4W3jx6MNMMOMZw2u46YTCnlfbBK6ZA1UYeAH1DIQJykcSQESinC8HpYIJt9A8g7UT0awzRP1F9nHa3wDnaAHndQYKMrjzlWo8ejQ0XHWgHhqnWHgW4h9sOnJckH00CYK1fHUKASJ3D8kOKax6uplexfz6BCvAoL9zm5TjeB1yxrpLp9NjjTWSKG2HOZhPkGpdEqU4mjnN2AkUVACPGos5YLBmTnSrdOEGZJDlAvJOUt800Mu3BYc1MiDIB6LMSSV5RsIUDFOzNletGQoq4G3yHZmx78uEse5vUTPFF3KT8LCrssqdIU9H97Npgf6N5j8arQ7ykLzN459jJaUzpGIo6uowPnUSatDf9GAvAmWNvsVTz6bYiAV71C7QF0C7UolYIQY6DHJEHejgX2YMEovWNLPL50eeC51h4DdPNv5G4ZdNtQTRVybYBZMpetGDiFmXN0JKa1sKHOSZxdrhKjxDIhrYVyCcRUMQ0sjGGHFuOcRszr6E5igEMtsebHQ3KYiGd5B27LikpUHhk61rgZlulHdMoS6YgQs6SV6UMVNku6sCw529xhUciDwRMhsbAjDlahYbrGa3NryxyV5LrXONGGKCchCqv7vDMdAtPrVr8M2vL5MySQAC3g90iugGQcLH3hCf9f1Kn5X0hM4KZTfwOPJhlfJsMRNhssiDoXaycUvOUS58266yPDlitPIAzO03XClm4EDPXGIwcwiFr7FcDo3tQIMZVy87i48Zb80s3zAYRiBIS0vO3RKGx3OGN5zid2B7MfnfLzvpvgZoirHhAqXffnym5abpZNzGuo5GowTRA2Ptk4Ve2JFoHACWpD6HiGnRZ9QVOmPICoQrSUQw45Jlk9onKJz5Erhnx0943Uno6tMJ5jbrWBNiIO7i04xzRBgujeiAJvuQkVDX2QLKRxZ7s6rhdfOaq6R6uL108gEzzlXOLqTTJXgM63rcUWNbE7wsIXcCFSF59LLJ7G5Qea33suxdDX6DcK4a0VMZoxmWPtCi1dAT9ggJqc2Sh7mkAqizaB16RXZvSydchpdVj6s4qn4ivr0HKHdAstX0XZ0FFU6lOiNmU3vasMg2uaVG8tyuG8N8VsuXIOQs7xtFxDhilYb8MQ9vES9pWfWPSXFlJAq4XKPY8a0JOIx57EQuWHo3uWgRTIRThvZP9YYzSnjGIHwjS8JeppICHofADXZhJ0uDQaQs7MiXEALpGmT3W6w0G3tBdZcuTDkWx1HsT5jd9jQeJpgD2VxdKh8U4Q3vANTAuwBXLJ2P0stS8Q72JWgNPwKYTY9cPoaGZlUFGgVsq8CdEFH9yW0c27G5s5sfHsyep6t4VxIHHMOX2GmMRyGxDI33am1J7ZmJ1NyXiwkHxtPH5QBpU2PMu2Guf3xIxlk3snMkMAsGO0vYfqO9tdIgdxMYO3HZTYv99OXaHcNQ5u0pRZZyVrNOIPurkEOdJy0nowPemIgUuHWh8vQCuDZav1m35AOl6ftSFuChSm5KstEWnC7q8mJ0juJEBkCRmQphP3V1pqiDjz6YA90qEe7MA3nzT0nHG8A1hWlqcPVPNz4qWNF6Fq1ub4075aXO0H7Krb6rhWGb3ZRPjpb4BKN8jGFQrBUMZprtjAJ67BnfmYgE0mmGLV2QP10gYS1T06kBRyrtp7he6wsPiBPJ7wxPLHNUN2SGQHBTSKagndM99fuaga5Sw9OT8Fzdo7xUJXfhJ97gUnNDrknal0B00NMNvajZeQQTJyBsVSwBZtZ45ZCcq1idc7GWC0MITSk58cIVkSPXbrERUaygyY13dPeEVzjVi9aVJwUF6eJu1s8u3FCJqp2GoWIItwvZO69asX75fekFkmFpNavxM0X0dZC01TTPpV6E6PJoIfW8C06CKNHV7Gk2mkTWGSwUG4xD2L3G3XarodHDcmumFJX9Xviv0rvm38SCtin6OpjH8MHYDrj1OxTJbC2VclJxv73z2BDBquosKOik0fmgbPZN0FUTmjBEwHTvqd5QHTwb3nOpEz3X6YCF0lrcrQc0uhyr7gBGBs86nUBWFRp1LKjIRVTVXDipajqNDTQGNZtzvR9MUf1yJJV07inbrlPOENd7rHpKCrJtoZXOkDqInaIqoMCG3DVd353BGmZNJEKOa3DnL7fb9zwuHlvHAfCco7ZS4wAV87trWkp6skXux9v5WhkumbUyGq4ia6DM1PuqqnFfBTAWDzJsnggAJrzr8O7JbDtaXwcW9sqaOb0S6NvnUDZqiNdDQPMDOKvXRJJJQdf1FSrPCCSPEEWO1SeVwictj7rTbpWGRoukwhgJALys95pGGOQxCPzRGrtVFnGcsLN1CwI3wLbmDnNKUv3KpOLEOPRxQXeXuJRIiYCFum44c0wNr731DvHn3YEJMH4iwFONl1rolEL4w6KFUOCq7ekrE5iyUt1V32PNtuUshXRjOYjBval29JMH5GoqZlGhCczzHMA61cmuzqdFwiPCB9yzqvJTg8TqMNvwKJztFIQK4mc5Ev5rRVSozD796AVRKT8rZF39IA1kmCLdXqz7CCC8x4QjjDpxjKCXP5HkWf9mp2FNjE62a"), "version": (1, 0), "headers": [ ("SOMEHEADER", "0X0VfvRJPKiUBYDUS0Vbdm9Rv6pQ1giLdvXeG1SbOwwEjzKceTxd5RKlt9KHVdQkZPqnZ3jLsuj67otzLqX0Q1dY1EsBI1InsyGc2Dxdr5o7W5DsBGYV0SDMyta3V9bmBJXJQ6g8R9qPtNrED4eIPvVmFY7aokhFb4TILl5UnL8qI6qqiyniYDaPVMxDlZaoCNkDbukO34fOUJD6ZN541qmjWEq1rvtAYDI77mkzWSx5zOkYd62RFmY7YKrQC5gtIVq8SBLp09Ao53S3895ABRcxjrg99lfbgLQFYwbM4FQ6ab1Ll2uybZyEU8MHPt5Czst0cRsoG819SBphxygWcCNwB93KGLi1K9eiCuAgx6Ove165KObLrvfA1rDI5hiv83Gql0UohgKtHeRmtqM0McnCO1VWAnFxpi1hxIAlBrR4w35EcaryGEKKcL34QyzD1zlF4mkQkr1EAOTgIMKoLipGUgykz7UFN1cCuWyo3CkdZvukBS3IGtEfxFuFCcnp70WTIjZxXxU4owMbWW1ER5Gsx0ilET0mzekZL0ngCikNP2BRQikRdlVBQ3eiLzDjq27UAm7ufQ9MJla8Yxd6Ea37un9DMltQwGmnmeG5pET54STq72qfY4HCerWHbCX1qwHTErMfEfIWcYldDfytUTOj7NcWRga3xW7JYpPZHdlkb24evup3lI4arY6j5a12ZcX9zVI02IJG0QD9T4zSHEV0pdVFZ8xwOlSWKuZ9VZMmRyOwmfhIPA7fDV5SP8weRlSnSCSN4YBAfzFVNfPTyeoSfVpXsxIABhXEQTg12YvAAn9390wFhEhMsT9FWIiIs7oH63tQyjdEAZSJcZ0nSQfapvi4BDsQSMv3W2DofSzxwOPrVQWRMyvP0UV0J660Gc4iZ2Tixe3DSeqg9VuNvij09aCbkBdwJh9r4UWmM1Hp1ZDF5Rr14nKtFAgjVlGlfZi4bWQKTzOlqaVbWBvxdKsJ27eelyDnasIPqo17yY5lg10Lb8nyu60Wn7l7Xb0Ndp334B5am4Vh1foctvkkhNFeIejtnjPYmWjS77rJ1aL0zJka4Xog5Oparvc93Pddf9CzCxgle00BTKNj0syVo5uqvX5PVzdhAnigU4jdPbJbcPpbpJRU4UDqIswRNJOlGfpdLnCvnPIRB2a7btjFTaE0tne0TjedGbePje1Li21rPXPX7t5LICWl1SRyqQ9x9woGEv1sI5VgpRoKtS6oxWgMERjP3LcEez3XqLiSwv0rWMlDiJhxEopz8Mklx8ZygQLiwIYx2pNq0JhKB8K1lZ8dYE5d3nRWhXwG4gFTUg2JYjnjL81WGRmjXnZEVLwYfYBUkRlqWAYHi1E6wF85BfcwvkgnEeBTiQSlfu6xwCYaW2OEogq7tbdinvlpeEPij1qQivpcs573HPHpkXrEeXC9P2gZhmV1Rvn69NAN2lOXSVe8XotSyCG5fHFsTDYlOvYW8EBrAdWuZrwU753xwjk3QCp2ODetYze98voig4lfYHrrWT43VXcHt8J5z7U3kt5O460buwESBhgkALZdrFYyy4YQcmnAeSCw5OoLArDEmzaI4JkFBCDqQxTE9BTYA112r9ymuOo5MGkTDYZlvtvopG4ekorfLoIa13Z9L6ZilXT1cg55dvNlOrbTSHpQTYRJfJ6x71IpDFyvdbZbOHQYMm98fcN9CLqFErkpcN4JO26GIhSodGGTSnzyUxBYueawFNlGxCMTa6JseX9c7Xlo8NRaZHBPvG7Z4gUCkOdUSEW0RRTs3TSSdjEKnJ6u9RdDqqyvN8cJ7gliTd04mSyVnkmxdqVU8DrdIrkSCfVQNoFgdydDHS3wMLU6QGTGBzK5pd9EfsDEeYXtIb3CkRupM4SERGMTN8TyIxqqIyWmgjBmSGLTFOB5tsPhkVydVQNf7jBkDy6THfBy0uALVUkm2jLeTFXjajyeL4ms5Lgx0eLoz0XWN6WulXSA20zV3ObSCHbBeVUgKmPxHq5qPmAi04VFIvCOJ0rBQJh9ZHJMwvhI3VEBF6EmXOiRCn0XOhm3pfHlmaCAWrOSGuQs3NCNlFRjwmVRPY5FJrKYjH3FrLrLdU07zdViAix8C4LxVrRrMB6ligZC3CoDhFA4vMjiPU5SBRqRW4lwVnvMZEZbf0AYbBc2ymnKAOWbQwt2ldiI2qL0aLoL6YtSFUhpwMOR3LP1feUq6XRO5xc9V02nEt9MRQsl5MgmKMcXap4HqAN0yATpjAGRnWqEnE7E1XZg95cEl2gO4HXejKzR0kiTUudcw6P4t1RYLRx7isZNJxiq1JZz6FpEe7QhwGbhPySNMbXJtmYuhAaTpfGdGKMxvHHB9LmELOChdyfjHMwMZ2B0xgU2eJgJimCwLH3UEmExgAwJDD4GSCqevYAMK4P9FKPl0dku0KZ7uOJ8oNloEsrbvMuhuKFDuO1PNvxtdCcgASzNVzdueOtUm1giZIDqbb6j11nqi9NoFeck1zZi2kfGF7OeUp4vYszuhQNi4vd03QeVAduM9h9v36Nz1YobRxB2CjTp6qdKdW9IYBp8aExZpipnJIbfD2hTWE44kIu7Q17f4C9kycGjsLwAWkVbfTRmBMU8SbVKV1EJTrN1gGqGX7quSwg1Vp4qslKAk6EIkoReIl5DuzuH8Rbvrkp5LFFAhNhb1hvXvVWcibtDjQSradNtuYzGf2AAduhxOTnZjzbsceGYhQA5a5NtqxE2GBlW8CPoPzIyfMfPjdAIUmAcns7Fkp44nju2htwhryUyidEzDVyTwevquARjt5a7eu8qIKfPrYgbOAlPgA1JHNi55ivTNpDuQ8drNiafZIntA43HI447WtITYYvLxFRG8OWvJRwI0N7dvHYO8H8lYI1OwatfvLKlJqjtdJBBvMWXdT4SbxHUdNTDUQmqFGZaLx1AvYPnJTYRzrqn5ZnXyWQ1ZCwtvZK209TxoezJ2sGorE46C7Zyki6EcXlX2A8upUUh9IhqLYTzidIRrAPE5mZmosyDyShjnRiN5CLXZAI21eV4v3a6WXI8TKkUk3fhhajOgPXshlyCEfDAyESpz1J8RECu6vQs81E1ZNE5ha5UGw2wk3Ea8oSTfqTiu0OeisV2a6bfldvW4x0OL8PS57uuY0v0OZPSUPWmPQgnmJRVw8vmh62bpFekMnUH7y31fXU6MIyZaiBs1FEu7qF6irBszHt2ARy50SjgGwQZWcecgvB8gB874g3ES9mZer3diYGF3Wssmsm6XRdsNcuNn3yzuoi52cRrBYUOISegTBVApn4zfuCC9Y4AAfe6wmmiuN8hL6KJeOjrdK5EFQHGyrzeuIMaT3B2nKz1PNONVQ0udbqCQebz3cq7NPe6kGKFLiE6euWjdoMuAbuu8rTkAa42ensXz4a1Yo450ZVgYypaDtepDQWFkJyTHDW1HTVZfCok0tp7STRiQ8n3NKxOUSL9veuTsDs1FaV2rbzR3DvkEJrhJ10Rm0pvLgui5GUDKyWLnrqcNVtOIzFaj9K5pwMfnREm1VIs84ePX0GsMjirfOfubzDoYjavbiCtTB86nKx0tfCKtl0yUQ5PWSBqdGASY3mr5hZcFZ9bA6uXXGTNqMpUH3gqxCoF6t2yAim93t77jYkiFt3OBlBRVQzRsPbgEKRXbX3bWQj6NpDzNCQPYTs45HsQB967f4yByzLH8X289YAZJhJJyFTMCLbpdKFuMBX5Msyr4d15sBa1h5bI13dqU14WBnMKD12LkHMjHiyde6xf5EELf082sUfiAZaROFuDCDnA89p6y6oYEUgF1L9yQElZO4R6IrkJsEFN9hvARf3CH4ENqbYxtUN9gsB9CLCGKMy2R4wGKU3Dkyea27YCR4QHCdqX3HqOpy12uxBANvbrfEro9q5NJrGK7WVq3nNabN05x4TmIZk3asc8ehvDyhSgQLY0wwyvrkcYqNiETybJ57RjwVg1YE0IZEBfyAUNXE4goc2jtbZbHfcpTzt08pSJQZTAzuxrdQLS4EnaFHPpMdPh1YXUdclj6g2sjYbhoTYcV97bVDAUztMZ4EarUcv6tgQOvK66RmJCF2zVEpFDBS6AVZJWzrVlnuiweXpH0L9eY2Wy2EuAHi7gL4o0i0AkOapqY1TPUWUwBaVrKQzkL8QQbczgc97pMvSnGYMlcSdzlamFtUmRoOPmhBGMpVqmcxnstnqJ0TXMV65zbRN2hk3YVF5HwPjuWJmfkVYnyazuqKuaaohrQIe7YOOSAmD7C2vDnI50y1oScQqIPb87QAmguFz7jfNBSPymjPJ7UrToaJen7LEQr8S2b69ayZYNIyWbcpaW5ACUqdyT5AeHYhdENORnWS2B17qnBPtyvb4WujJCafLmsMFhQbcGonDZkHEOAnOcwRwJ4KIPr4MlQLRKsdnurPDDEmpCtCnFg8vPObOPHoHgICb9j35pG1YNhAAGIGTZ4g3JTJzFvTcW7GDRxREPZffKOuQTJoMYYaaPwnE0SainEpCFAukJbDy1ss5cZt60nqTw1asLzwMKJu5PHpU9sB9YN7J2cPhIbfb4387zSmSvqbt3I8NFjDbuYEhe6nZ7gRT5Th0W0MoyzHlmy4MSXbaAfUJNsLQJmdhdVKDsqMz0aXKIVNsXtn88owrhw0yqxU0K3IfTothafhpQ8daRUnbjzULViWRvUz7dI1N3GgylRzaEXQPgbj0DQ7RujNTcJoSp7I1ELjFFSBZDm4Jx5eXq0aS2SKJPFX7XmFfkkR99wRiHx4ByVTL5umojRhY5j8vg3l3yfliJbeOTXckaYiezrucuHaiVFWR2kjk9PUm57bDpvtSFMic652iDufj4hqpy5MH5r2lg67T6Bbb3fcq49cVJ3hkN2GfRqVhoPxmHyvotu5koheVh7oHDaLaf4VvcQMd5MF8sicaX3GXfoLjlfFZwfJBpXNbbVemD7XghpIEwuFjA1USU8yJnTdvCJ2bFmPNWFeWsBVDyl7XUsbgB3K2zz806xODZT639dqiqhGXQNbgYtShikQhiHhZF4wf4IY588LE4EO2bdXBb2Wezm8Gl2J5GAfqnx5Z6NF7h1gGkM27hpnmKNylKZjqTNANj0CRU4awpdVrYGX7hT0u452Y5bXpVl7cLuK7j2k7VG93NXPsXADhQA8R9WDcpU0PLzFWFq1omoQ9ZRSlvh8R4pRp4vHIYf4A5uQEmv5Owr4pFQcWdp5GAdkpBaSHvUhvMxOSpsqVB2LHvvs1RiOUHHhHdZEKpX25mK9moud8pKT4efru1SlRRSsxdz87hTJMUrueydHDPXbo9AvExctdqxuCk03Fy8cB57qrkQQ50oGNuTNPColMrwVfmuTt81uSZremLbINILnCVXEnvTugRQfFYMnprqMB4mVJfZfh6XVLdOyW4BPaFrBsZGFy7udoWJwE8ACx4UpJW6m1ltckofzA6AUxzXprXDCCL118m8bBB2hzDKmqeLk5ZYKsLROkTqRAxmJjBSZSo2XBroO5rVvkOZrOZRe8NgaHFMLPn0I6hsqwA7VdKlpbqknax84iWrtBe8ErxgPIQeYhELyK1deW1YWBagD21MBTc2h5LliIlglZg41H8Zl3GvUv0XNZegR5bx1kiM9WFGV9Yt37iQQGquWAMKCAb6AqpkCtKs7sXKaEAVsbh32tlkAg4ngspjwzYHTPYKUuigPX5K8siUfaAW9WJl7r8dc4ju97osWETOcBENLsfwB66TvsttORtOedylnErplZP3hjt7o39JllXDobj3l10bSr4B09eYVWi2DLGavYktKSKj1PrqzuGUaqcFxqoebpuDEAx5vl8ZmSYrmS2RBJ1n2s3lkKdaVWTmfIXlyMMT7Ac3lCXpGNnpf8ccTffv3E0fBrpCSpVc48dM5e5iTpRPrfWxAjrud9jSrqVBXsw3pqUvhuVmBpmwoKAfQGxHrauna3f48AFefGDozxXXjpdM9ZDWHsRUBTFNzDs8tUATtegSzZfNJCS9k0p5q2cueyU1mtwMJIdf0FrsVGiAyX7PFkWvLHi29fpprZQd0gbMMw2Bt10ZbZCsjPX261cXmVa6ZPnkVQm2w1ory3uWejuq20oQCyXTYyv1Ki4tbdPxoNn04Je7uS3QHDCsUl4i9zKNhBJ3g55bhIZWfwmLi3S7oY16gImdC6vvjsMKkCPzXv4pPaVhHH7o4f0mWEz30k4o7GQNOUy8LPM3NmlZF7QaIBdRfozG86jwQkC3jTNR357pdPjOqMERtIS4WEJBgbaeUCu5MOhsNdaD91iCeghIpOECFyTdEkUCGPPCIAtuAOKBdhPu40UxHx30dELMTK3azHOuOnLTsdiM4KJ9yF4Ab2eiz5j2T95sDx3aiEJDVDPCa55hO0XTBM9OSNtdzjdTdZT19XrwD0wPWZcBhfJ66X1uNM2eud1btzglqZP52qqYU7BK2M3BBZKKjy7P6YzmgaPHWnFGHZdwdz3Yq6e3N76Cjkfl8Sy0mkwd6pt0geDM1jNNZrcT8dUfLLaiUqcZm1KRVdpZaBrboDSuCxfWYlxqgsldwlGL4C06ceFUDXX8PzxzWEgOd8OU4F22pcNJOnwJGo6rYA3tvhAuq2WKVg6tgFCb1p7dzF4Ke3J0dv3IneMSNnHG4hkvxW6VzIykDUtYEjMQO35tdnEA0vMVLXIahpJpz4HGs5wwRgoZx1e1zD1pXi7KmEVTlfattgcGFlKjZJ60fEdloZEmiXodxT63CzuJHnjHDOL8qcMzTxHb8OCainga4w1fk4uILLAWqmTFpDcFGSF5lbOFUwhvtMK6knIWZ8ZApZvTGBt1qv3xKUJqPcWiweI4kk57zgyTPZku2mg4fJWDKSfiRSi7LvtpKkdqjein9lP7LMv5lKutprVzjmvHBPjunXGqakWx39xYH8RD6qF3Fw2BnIIesiicZsDv69Ggbu9Y334UeFPNIJ3LGp2I8xcUxlP5dJAh4V05p1HvIZ5Fhk0oCWlvNXdLqzbVsbfW9jWyQTaZXzw7WT3rqFQc7wvw4ayp5eKmUclqB1yOvrI14XGhmH7QMaAYNTIE2RHjYXVgvbmFRi0oB1v4nDEeSTn3KHBRQD8TilCagKg0XYPj2eAgWs12ZRYzlGyCvYZ1pol5wAwc9AFFGwsTJ9UYkbxlZv7wKDx7nFzlUSMC1kMvS2ECwvHzSycqHPRwCGipvG6kWz0mGvASXeKjm47iMROoY0MRK0uvgNdTTOTdxkMgOuCDIlxfit5QKjyzaVAg2kDwENfSd6XPMgSprTSLuNDXdg5NHCwUvDbEHVxpMgOItZymPZtPweOrnPdlEB4UwLZ8jqtShi5oDYvhkh85FwwT25OHFvDUWTTCV5n73pQ8kLo8zsB3mbWfGwg62guj3C50Dh42fAZEPBRSHDRTg3r0z39Vyj490lk2UpZeNyylwuEKmuIqEkbE3BRT2YEjTM8a2PU5grCuzculibcoRUpb1sIQiMRTf4wrtT1CnKcoUJ1T28DC04dTJVRcm3w3WzNLdrnovkX6NahblTzDvq5eXkoEaZv6HClmGuho4FH6s6i0OdmmW8qkNOnk7BhexiyAd3UYERlFwvZ6LP55tFOc3vnlhyylx1rTTgu1NFljRNs7rGiT7SnGFaFK7GITEZFEYI7DmOEUZXxDSHjYuOVN0YAJP2cZFgagyMwGJdrpH8S7cewYPMKz2Go2GBKl1OA6pJ8T91tUdEcGVg9JCMQUA4sBtlIuRTVV3cduIhsLCTi2ewItkh9MRP1kevVa9WcXejQQKreZmq5EZtzThW71r7E2tcvwFeqiwv3JZnV16bZ7NwZT6uvSrOnIFUyMsxhh8xCkVY82VLTAZhPXB8t6CbyjZ5stos6WmNZgoEsD8GU8pmzSTubAqQXkTbiODF2pePe6S9uQ9HngGGBnOjY4QUcAcScDsfflyXVqyxgTelGD4vXoba6qRWCqc9LKpyk4jCKYvLX9tzXusO7bhT2KRvF4MObDqdE4KnCCIF3zeVD0vImR20MmRTBHRCNm3s6GfyeTYEAlW3L2igZJ7Myj5zGLccMt2EohGc38HfWZ4mlvXRLHKB233PyKALYifqlAxTXaWUk13o6nACQDvN7DxSCA0daJeuznK1Dr52bC4IXCTahK1An6LkQMfsXb7Qus6ey241Vb4wTgFHqsdCx7qPxeAghmsTOHRVl") ], "body": '' } benoitc-gunicorn-f5fb19e/tests/requests/valid/025.http000066400000000000000000000002071514360242400230050ustar00rootroot00000000000000POST /chunked HTTP/1.1\r\n Transfer-Encoding: gzip\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/025.py000066400000000000000000000005041514360242400224560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/chunked"), "version": (1, 1), "headers": [ ('TRANSFER-ENCODING', 'gzip'), ('TRANSFER-ENCODING', 'chunked') ], "body": b"hello world" } benoitc-gunicorn-f5fb19e/tests/requests/valid/025_line.http000066400000000000000000000001601514360242400240120ustar00rootroot00000000000000POST /chunked HTTP/1.1\r\n Transfer-Encoding: gzip,chunked\r\n \r\n 5\r\n hello\r\n 6\r\n world\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/025_line.py000066400000000000000000000004431514360242400234670ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/chunked"), "version": (1, 1), "headers": [ ('TRANSFER-ENCODING', 'gzip,chunked') ], "body": b"hello world" } benoitc-gunicorn-f5fb19e/tests/requests/valid/026.http000066400000000000000000000200511514360242400230050ustar00rootroot00000000000000GET / HTTP/1.0\r\n Someheader: 0X0VfvRJPKiUBYDUS0Vbdm9Rv6pQ1giLdvXeG1SbOwwEjzKceTxd5RKlt9KHVdQkZPqnZ3jLsuj67otzLqX0Q1dY1EsBI1InsyGc2Dxdr5o7W5DsBGYV0SDMyta3V9bmBJXJQ6g8R9qPtNrED4eIPvVmFY7aokhFb4TILl5UnL8qI6qqiyniYDaPVMxDlZaoCNkDbukO34fOUJD6ZN541qmjWEq1rvtAYDI77mkzWSx5zOkYd62RFmY7YKrQC5gtIVq8SBLp09Ao53S3895ABRcxjrg99lfbgLQFYwbM4FQ6ab1Ll2uybZyEU8MHPt5Czst0cRsoG819SBphxygWcCNwB93KGLi1K9eiCuAgx6Ove165KObLrvfA1rDI5hiv83Gql0UohgKtHeRmtqM0McnCO1VWAnFxpi1hxIAlBrR4w35EcaryGEKKcL34QyzD1zlF4mkQkr1EAOTgIMKoLipGUgykz7UFN1cCuWyo3CkdZvukBS3IGtEfxFuFCcnp70WTIjZxXxU4owMbWW1ER5Gsx0ilET0mzekZL0ngCikNP2BRQikRdlVBQ3eiLzDjq27UAm7ufQ9MJla8Yxd6Ea37un9DMltQwGmnmeG5pET54STq72qfY4HCerWHbCX1qwHTErMfEfIWcYldDfytUTOj7NcWRga3xW7JYpPZHdlkb24evup3lI4arY6j5a12ZcX9zVI02IJG0QD9T4zSHEV0pdVFZ8xwOlSWKuZ9VZMmRyOwmfhIPA7fDV5SP8weRlSnSCSN4YBAfzFVNfPTyeoSfVpXsxIABhXEQTg12YvAAn9390wFhEhMsT9FWIiIs7oH63tQyjdEAZSJcZ0nSQfapvi4BDsQSMv3W2DofSzxwOPrVQWRMyvP0UV0J660Gc4iZ2Tixe3DSeqg9VuNvij09aCbkBdwJh9r4UWmM1Hp1ZDF5Rr14nKtFAgjVlGlfZi4bWQKTzOlqaVbWBvxdKsJ27eelyDnasIPqo17yY5lg10Lb8nyu60Wn7l7Xb0Ndp334B5am4Vh1foctvkkhNFeIejtnjPYmWjS77rJ1aL0zJka4Xog5Oparvc93Pddf9CzCxgle00BTKNj0syVo5uqvX5PVzdhAnigU4jdPbJbcPpbpJRU4UDqIswRNJOlGfpdLnCvnPIRB2a7btjFTaE0tne0TjedGbePje1Li21rPXPX7t5LICWl1SRyqQ9x9woGEv1sI5VgpRoKtS6oxWgMERjP3LcEez3XqLiSwv0rWMlDiJhxEopz8Mklx8ZygQLiwIYx2pNq0JhKB8K1lZ8dYE5d3nRWhXwG4gFTUg2JYjnjL81WGRmjXnZEVLwYfYBUkRlqWAYHi1E6wF85BfcwvkgnEeBTiQSlfu6xwCYaW2OEogq7tbdinvlpeEPij1qQivpcs573HPHpkXrEeXC9P2gZhmV1Rvn69NAN2lOXSVe8XotSyCG5fHFsTDYlOvYW8EBrAdWuZrwU753xwjk3QCp2ODetYze98voig4lfYHrrWT43VXcHt8J5z7U3kt5O460buwESBhgkALZdrFYyy4YQcmnAeSCw5OoLArDEmzaI4JkFBCDqQxTE9BTYA112r9ymuOo5MGkTDYZlvtvopG4ekorfLoIa13Z9L6ZilXT1cg55dvNlOrbTSHpQTYRJfJ6x71IpDFyvdbZbOHQYMm98fcN9CLqFErkpcN4JO26GIhSodGGTSnzyUxBYueawFNlGxCMTa6JseX9c7Xlo8NRaZHBPvG7Z4gUCkOdUSEW0RRTs3TSSdjEKnJ6u9RdDqqyvN8cJ7gliTd04mSyVnkmxdqVU8DrdIrkSCfVQNoFgdydDHS3wMLU6QGTGBzK5pd9EfsDEeYXtIb3CkRupM4SERGMTN8TyIxqqIyWmgjBmSGLTFOB5tsPhkVydVQNf7jBkDy6THfBy0uALVUkm2jLeTFXjajyeL4ms5Lgx0eLoz0XWN6WulXSA20zV3ObSCHbBeVUgKmPxHq5qPmAi04VFIvCOJ0rBQJh9ZHJMwvhI3VEBF6EmXOiRCn0XOhm3pfHlmaCAWrOSGuQs3NCNlFRjwmVRPY5FJrKYjH3FrLrLdU07zdViAix8C4LxVrRrMB6ligZC3CoDhFA4vMjiPU5SBRqRW4lwVnvMZEZbf0AYbBc2ymnKAOWbQwt2ldiI2qL0aLoL6YtSFUhpwMOR3LP1feUq6XRO5xc9V02nEt9MRQsl5MgmKMcXap4HqAN0yATpjAGRnWqEnE7E1XZg95cEl2gO4HXejKzR0kiTUudcw6P4t1RYLRx7isZNJxiq1JZz6FpEe7QhwGbhPySNMbXJtmYuhAaTpfGdGKMxvHHB9LmELOChdyfjHMwMZ2B0xgU2eJgJimCwLH3UEmExgAwJDD4GSCqevYAMK4P9FKPl0dku0KZ7uOJ8oNloEsrbvMuhuKFDuO1PNvxtdCcgASzNVzdueOtUm1giZIDqbb6j11nqi9NoFeck1zZi2kfGF7OeUp4vYszuhQNi4vd03QeVAduM9h9v36Nz1YobRxB2CjTp6qdKdW9IYBp8aExZpipnJIbfD2hTWE44kIu7Q17f4C9kycGjsLwAWkVbfTRmBMU8SbVKV1EJTrN1gGqGX7quSwg1Vp4qslKAk6EIkoReIl5DuzuH8Rbvrkp5LFFAhNhb1hvXvVWcibtDjQSradNtuYzGf2AAduhxOTnZjzbsceGYhQA5a5NtqxE2GBlW8CPoPzIyfMfPjdAIUmAcns7Fkp44nju2htwhryUyidEzDVyTwevquARjt5a7eu8qIKfPrYgbOAlPgA1JHNi55ivTNpDuQ8drNiafZIntA43HI447WtITYYvLxFRG8OWvJRwI0N7dvHYO8H8lYI1OwatfvLKlJqjtdJBBvMWXdT4SbxHUdNTDUQmqFGZaLx1AvYPnJTYRzrqn5ZnXyWQ1ZCwtvZK209TxoezJ2sGorE46C7Zyki6EcXlX2A8upUUh9IhqLYTzidIRrAPE5mZmosyDyShjnRiN5CLXZAI21eV4v3a6WXI8TKkUk3fhhajOgPXshlyCEfDAyESpz1J8RECu6vQs81E1ZNE5ha5UGw2wk3Ea8oSTfqTiu0OeisV2a6bfldvW4x0OL8PS57uuY0v0OZPSUPWmPQgnmJRVw8vmh62bpFekMnUH7y31fXU6MIyZaiBs1FEu7qF6irBszHt2ARy50SjgGwQZWcecgvB8gB874g3ES9mZer3diYGF3Wssmsm6XRdsNcuNn3yzuoi52cRrBYUOISegTBVApn4zfuCC9Y4AAfe6wmmiuN8hL6KJeOjrdK5EFQHGyrzeuIMaT3B2nKz1PNONVQ0udbqCQebz3cq7NPe6kGKFLiE6euWjdoMuAbuu8rTkAa42ensXz4a1Yo450ZVgYypaDtepDQWFkJyTHDW1HTVZfCok0tp7STRiQ8n3NKxOUSL9veuTsDs1FaV2rbzR3DvkEJrhJ10Rm0pvLgui5GUDKyWLnrqcNVtOIzFaj9K5pwMfnREm1VIs84ePX0GsMjirfOfubzDoYjavbiCtTB86nKx0tfCKtl0yUQ5PWSBqdGASY3mr5hZcFZ9bA6uXXGTNqMpUH3gqxCoF6t2yAim93t77jYkiFt3OBlBRVQzRsPbgEKRXbX3bWQj6NpDzNCQPYTs45HsQB967f4yByzLH8X289YAZJhJJyFTMCLbpdKFuMBX5Msyr4d15sBa1h5bI13dqU14WBnMKD12LkHMjHiyde6xf5EELf082sUfiAZaROFuDCDnA89p6y6oYEUgF1L9yQElZO4R6IrkJsEFN9hvARf3CH4ENqbYxtUN9gsB9CLCGKMy2R4wGKU3Dkyea27YCR4QHCdqX3HqOpy12uxBANvbrfEro9q5NJrGK7WVq3nNabN05x4TmIZk3asc8ehvDyhSgQLY0wwyvrkcYqNiETybJ57RjwVg1YE0IZEBfyAUNXE4goc2jtbZbHfcpTzt08pSJQZTAzuxrdQLS4EnaFHPpMdPh1YXUdclj6g2sjYbhoTYcV97bVDAUztMZ4EarUcv6tgQOvK66RmJCF2zVEpFDBS6AVZJWzrVlnuiweXpH0L9eY2Wy2EuAHi7gL4o0i0AkOapqY1TPUWUwBaVrKQzkL8QQbczgc97pMvSnGYMlcSdzlamFtUmRoOPmhBGMpVqmcxnstnqJ0TXMV65zbRN2hk3YVF5HwPjuWJmfkVYnyazuqKuaaohrQIe7YOOSAmD7C2vDnI50y1oScQqIPb87QAmguFz7jfNBSPymjPJ7UrToaJen7LEQr8S2b69ayZYNIyWbcpaW5ACUqdyT5AeHYhdENORnWS2B17qnBPtyvb4WujJCafLmsMFhQbcGonDZkHEOAnOcwRwJ4KIPr4MlQLRKsdnurPDDEmpCtCnFg8vPObOPHoHgICb9j35pG1YNhAAGIGTZ4g3JTJzFvTcW7GDRxREPZffKOuQTJoMYYaaPwnE0SainEpCFAukJbDy1ss5cZt60nqTw1asLzwMKJu5PHpU9sB9YN7J2cPhIbfb4387zSmSvqbt3I8NFjDbuYEhe6nZ7gRT5Th0W0MoyzHlmy4MSXbaAfUJNsLQJmdhdVKDsqMz0aXKIVNsXtn88owrhw0yqxU0K3IfTothafhpQ8daRUnbjzULViWRvUz7dI1N3GgylRzaEXQPgbj0DQ7RujNTcJoSp7I1ELjFFSBZDm4Jx5eXq0aS2SKJPFX7XmFfkkR99wRiHx4ByVTL5umojRhY5j8vg3l3yfliJbeOTXckaYiezrucuHaiVFWR2kjk9PUm57bDpvtSFMic652iDufj4hqpy5MH5r2lg67T6Bbb3fcq49cVJ3hkN2GfRqVhoPxmHyvotu5koheVh7oHDaLaf4VvcQMd5MF8sicaX3GXfoLjlfFZwfJBpXNbbVemD7XghpIEwuFjA1USU8yJnTdvCJ2bFmPNWFeWsBVDyl7XUsbgB3K2zz806xODZT639dqiqhGXQNbgYtShikQhiHhZF4wf4IY588LE4EO2bdXBb2Wezm8Gl2J5GAfqnx5Z6NF7h1gGkM27hpnmKNylKZjqTNANj0CRU4awpdVrYGX7hT0u452Y5bXpVl7cLuK7j2k7VG93NXPsXADhQA8R9WDcpU0PLzFWFq1omoQ9ZRSlvh8R4pRp4vHIYf4A5uQEmv5Owr4pFQcWdp5GAdkpBaSHvUhvMxOSpsqVB2LHvvs1RiOUHHhHdZEKpX25mK9moud8pKT4efru1SlRRSsxdz87hTJMUrueydHDPXbo9AvExctdqxuCk03Fy8cB57qrkQQ50oGNuTNPColMrwVfmuTt81uSZremLbINILnCVXEnvTugRQfFYMnprqMB4mVJfZfh6XVLdOyW4BPaFrBsZGFy7udoWJwE8ACx4UpJW6m1ltckofzA6AUxzXprXDCCL118m8bBB2hzDKmqeLk5ZYKsLROkTqRAxmJjBSZSo2XBroO5rVvkOZrOZRe8NgaHFMLPn0I6hsqwA7VdKlpbqknax84iWrtBe8ErxgPIQeYhELyK1deW1YWBagD21MBTc2h5LliIlglZg41H8Zl3GvUv0XNZegR5bx1kiM9WFGV9Yt37iQQGquWAMKCAb6AqpkCtKs7sXKaEAVsbh32tlkAg4ngspjwzYHTPYKUuigPX5K8siUfaAW9WJl7r8dc4ju97osWETOcBENLsfwB66TvsttORtOedylnErplZP3hjt7o39JllXDobj3l10bSr4B09eYVWi2DLGavYktKSKj1PrqzuGUaqcFxqoebpuDEAx5vl8ZmSYrmS2RBJ1n2s3lkKdaVWTmfIXlyMMT7Ac3lCXpGNnpf8ccTffv3E0fBrpCSpVc48dM5e5iTpRPrfWxAjrud9jSrqVBXsw3pqUvhuVmBpmwoKAfQGxHrauna3f48AFefGDozxXXjpdM9ZDWHsRUBTFNzDs8tUATtegSzZfNJCS9k0p5q2cueyU1mtwMJIdf0FrsVGiAyX7PFkWvLHi29fpprZQd0gbMMw2Bt10ZbZCsjPX261cXmVa6ZPnkVQm2w1ory3uWejuq20oQCyXTYyv1Ki4tbdPxoNn04Je7uS3QHDCsUl4i9zKNhBJ3g55bhIZWfwmLi3S7oY16gImdC6vvjsMKkCPzXv4pPaVhHH7o4f0mWEz30k4o7GQNOUy8LPM3NmlZF7QaIBdRfozG86jwQkC3jTNR357pdPjOqMERtIS4WEJBgbaeUCu5MOhsNdaD91iCeghIpOECFyTdEkUCGPPCIAtuAOKBdhPu40UxHx30dELMTK3azHOuOnLTsdiM4KJ9yF4Ab2eiz5j2T95sDx3aiEJDVDPCa55hO0XTBM9OSNtdzjdTdZT19XrwD0wPWZcBhfJ66X1uNM2eud1btzglqZP52qqYU7BK2M3BBZKKjy7P6YzmgaPHWnFGHZdwdz3Yq6e3N76Cjkfl8Sy0mkwd6pt0geDM1jNNZrcT8dUfLLaiUqcZm1KRVdpZaBrboDSuCxfWYlxqgsldwlGL4C06ceFUDXX8PzxzWEgOd8OU4F22pcNJOnwJGo6rYA3tvhAuq2WKVg6tgFCb1p7dzF4Ke3J0dv3IneMSNnHG4hkvxW6VzIykDUtYEjMQO35tdnEA0vMVLXIahpJpz4HGs5wwRgoZx1e1zD1pXi7KmEVTlfattgcGFlKjZJ60fEdloZEmiXodxT63CzuJHnjHDOL8qcMzTxHb8OCainga4w1fk4uILLAWqmTFpDcFGSF5lbOFUwhvtMK6knIWZ8ZApZvTGBt1qv3xKUJqPcWiweI4kk57zgyTPZku2mg4fJWDKSfiRSi7LvtpKkdqjein9lP7LMv5lKutprVzjmvHBPjunXGqakWx39xYH8RD6qF3Fw2BnIIesiicZsDv69Ggbu9Y334UeFPNIJ3LGp2I8xcUxlP5dJAh4V05p1HvIZ5Fhk0oCWlvNXdLqzbVsbfW9jWyQTaZXzw7WT3rqFQc7wvw4ayp5eKmUclqB1yOvrI14XGhmH7QMaAYNTIE2RHjYXVgvbmFRi0oB1v4nDEeSTn3KHBRQD8TilCagKg0XYPj2eAgWs12ZRYzlGyCvYZ1pol5wAwc9AFFGwsTJ9UYkbxlZv7wKDx7nFzlUSMC1kMvS2ECwvHzSycqHPRwCGipvG6kWz0mGvASXeKjm47iMROoY0MRK0uvgNdTTOTdxkMgOuCDIlxfit5QKjyzaVAg2kDwENfSd6XPMgSprTSLuNDXdg5NHCwUvDbEHVxpMgOItZymPZtPweOrnPdlEB4UwLZ8jqtShi5oDYvhkh85FwwT25OHFvDUWTTCV5n73pQ8kLo8zsB3mbWfGwg62guj3C50Dh42fAZEPBRSHDRTg3r0z39Vyj490lk2UpZeNyylwuEKmuIqEkbE3BRT2YEjTM8a2PU5grCuzculibcoRUpb1sIQiMRTf4wrtT1CnKcoUJ1T28DC04dTJVRcm3w3WzNLdrnovkX6NahblTzDvq5eXkoEaZv6HClmGuho4FH6s6i0OdmmW8qkNOnk7BhexiyAd3UYERlFwvZ6LP55tFOc3vnlhyylx1rTTgu1NFljRNs7rGiT7SnGFaFK7GITEZFEYI7DmOEUZXxDSHjYuOVN0YAJP2cZFgagyMwGJdrpH8S7cewYPMKz2Go2GBKl1OA6pJ8T91tUdEcGVg9JCMQUA4sBtlIuRTVV3cduIhsLCTi2ewItkh9MRP1kevVa9WcXejQQKreZmq5EZtzThW71r7E2tcvwFeqiwv3JZnV16bZ7NwZT6uvSrOnIFUyMsxhh8xCkVY82VLTAZhPXB8t6CbyjZ5stos6WmNZgoEsD8GU8pmzSTubAqQXkTbiODF2pePe6S9uQ9HngGGBnOjY4QUcAcScDsfflyXVqyxgTelGD4vXoba6qRWCqc9LKpyk4jCKYvLX9tzXusO7bhT2KRvF4MObDqdE4KnCCIF3zeVD0vImR20MmRTBHRCNm3s6GfyeTYEAlW3L2igZJ7Myj5zGLccMt2EohGc38HfWZ4mlvXRLHKB233PyKALYifqlAxTXaWUk13o6nACQDvN7DxSCA0daJeuznK1Dr52bC4IXCTahK1An6LkQMfsXb7Qus6ey241Vb4wTgFHqsdCx7qPxeAghmsTOHRVl\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/026.py000066400000000000000000000205711514360242400224650ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set('limit_request_line', 0) cfg.set('limit_request_field_size', 8210) request = { "method": "GET", "uri": uri("/"), "version": (1, 0), "headers": [ ("SOMEHEADER", "0X0VfvRJPKiUBYDUS0Vbdm9Rv6pQ1giLdvXeG1SbOwwEjzKceTxd5RKlt9KHVdQkZPqnZ3jLsuj67otzLqX0Q1dY1EsBI1InsyGc2Dxdr5o7W5DsBGYV0SDMyta3V9bmBJXJQ6g8R9qPtNrED4eIPvVmFY7aokhFb4TILl5UnL8qI6qqiyniYDaPVMxDlZaoCNkDbukO34fOUJD6ZN541qmjWEq1rvtAYDI77mkzWSx5zOkYd62RFmY7YKrQC5gtIVq8SBLp09Ao53S3895ABRcxjrg99lfbgLQFYwbM4FQ6ab1Ll2uybZyEU8MHPt5Czst0cRsoG819SBphxygWcCNwB93KGLi1K9eiCuAgx6Ove165KObLrvfA1rDI5hiv83Gql0UohgKtHeRmtqM0McnCO1VWAnFxpi1hxIAlBrR4w35EcaryGEKKcL34QyzD1zlF4mkQkr1EAOTgIMKoLipGUgykz7UFN1cCuWyo3CkdZvukBS3IGtEfxFuFCcnp70WTIjZxXxU4owMbWW1ER5Gsx0ilET0mzekZL0ngCikNP2BRQikRdlVBQ3eiLzDjq27UAm7ufQ9MJla8Yxd6Ea37un9DMltQwGmnmeG5pET54STq72qfY4HCerWHbCX1qwHTErMfEfIWcYldDfytUTOj7NcWRga3xW7JYpPZHdlkb24evup3lI4arY6j5a12ZcX9zVI02IJG0QD9T4zSHEV0pdVFZ8xwOlSWKuZ9VZMmRyOwmfhIPA7fDV5SP8weRlSnSCSN4YBAfzFVNfPTyeoSfVpXsxIABhXEQTg12YvAAn9390wFhEhMsT9FWIiIs7oH63tQyjdEAZSJcZ0nSQfapvi4BDsQSMv3W2DofSzxwOPrVQWRMyvP0UV0J660Gc4iZ2Tixe3DSeqg9VuNvij09aCbkBdwJh9r4UWmM1Hp1ZDF5Rr14nKtFAgjVlGlfZi4bWQKTzOlqaVbWBvxdKsJ27eelyDnasIPqo17yY5lg10Lb8nyu60Wn7l7Xb0Ndp334B5am4Vh1foctvkkhNFeIejtnjPYmWjS77rJ1aL0zJka4Xog5Oparvc93Pddf9CzCxgle00BTKNj0syVo5uqvX5PVzdhAnigU4jdPbJbcPpbpJRU4UDqIswRNJOlGfpdLnCvnPIRB2a7btjFTaE0tne0TjedGbePje1Li21rPXPX7t5LICWl1SRyqQ9x9woGEv1sI5VgpRoKtS6oxWgMERjP3LcEez3XqLiSwv0rWMlDiJhxEopz8Mklx8ZygQLiwIYx2pNq0JhKB8K1lZ8dYE5d3nRWhXwG4gFTUg2JYjnjL81WGRmjXnZEVLwYfYBUkRlqWAYHi1E6wF85BfcwvkgnEeBTiQSlfu6xwCYaW2OEogq7tbdinvlpeEPij1qQivpcs573HPHpkXrEeXC9P2gZhmV1Rvn69NAN2lOXSVe8XotSyCG5fHFsTDYlOvYW8EBrAdWuZrwU753xwjk3QCp2ODetYze98voig4lfYHrrWT43VXcHt8J5z7U3kt5O460buwESBhgkALZdrFYyy4YQcmnAeSCw5OoLArDEmzaI4JkFBCDqQxTE9BTYA112r9ymuOo5MGkTDYZlvtvopG4ekorfLoIa13Z9L6ZilXT1cg55dvNlOrbTSHpQTYRJfJ6x71IpDFyvdbZbOHQYMm98fcN9CLqFErkpcN4JO26GIhSodGGTSnzyUxBYueawFNlGxCMTa6JseX9c7Xlo8NRaZHBPvG7Z4gUCkOdUSEW0RRTs3TSSdjEKnJ6u9RdDqqyvN8cJ7gliTd04mSyVnkmxdqVU8DrdIrkSCfVQNoFgdydDHS3wMLU6QGTGBzK5pd9EfsDEeYXtIb3CkRupM4SERGMTN8TyIxqqIyWmgjBmSGLTFOB5tsPhkVydVQNf7jBkDy6THfBy0uALVUkm2jLeTFXjajyeL4ms5Lgx0eLoz0XWN6WulXSA20zV3ObSCHbBeVUgKmPxHq5qPmAi04VFIvCOJ0rBQJh9ZHJMwvhI3VEBF6EmXOiRCn0XOhm3pfHlmaCAWrOSGuQs3NCNlFRjwmVRPY5FJrKYjH3FrLrLdU07zdViAix8C4LxVrRrMB6ligZC3CoDhFA4vMjiPU5SBRqRW4lwVnvMZEZbf0AYbBc2ymnKAOWbQwt2ldiI2qL0aLoL6YtSFUhpwMOR3LP1feUq6XRO5xc9V02nEt9MRQsl5MgmKMcXap4HqAN0yATpjAGRnWqEnE7E1XZg95cEl2gO4HXejKzR0kiTUudcw6P4t1RYLRx7isZNJxiq1JZz6FpEe7QhwGbhPySNMbXJtmYuhAaTpfGdGKMxvHHB9LmELOChdyfjHMwMZ2B0xgU2eJgJimCwLH3UEmExgAwJDD4GSCqevYAMK4P9FKPl0dku0KZ7uOJ8oNloEsrbvMuhuKFDuO1PNvxtdCcgASzNVzdueOtUm1giZIDqbb6j11nqi9NoFeck1zZi2kfGF7OeUp4vYszuhQNi4vd03QeVAduM9h9v36Nz1YobRxB2CjTp6qdKdW9IYBp8aExZpipnJIbfD2hTWE44kIu7Q17f4C9kycGjsLwAWkVbfTRmBMU8SbVKV1EJTrN1gGqGX7quSwg1Vp4qslKAk6EIkoReIl5DuzuH8Rbvrkp5LFFAhNhb1hvXvVWcibtDjQSradNtuYzGf2AAduhxOTnZjzbsceGYhQA5a5NtqxE2GBlW8CPoPzIyfMfPjdAIUmAcns7Fkp44nju2htwhryUyidEzDVyTwevquARjt5a7eu8qIKfPrYgbOAlPgA1JHNi55ivTNpDuQ8drNiafZIntA43HI447WtITYYvLxFRG8OWvJRwI0N7dvHYO8H8lYI1OwatfvLKlJqjtdJBBvMWXdT4SbxHUdNTDUQmqFGZaLx1AvYPnJTYRzrqn5ZnXyWQ1ZCwtvZK209TxoezJ2sGorE46C7Zyki6EcXlX2A8upUUh9IhqLYTzidIRrAPE5mZmosyDyShjnRiN5CLXZAI21eV4v3a6WXI8TKkUk3fhhajOgPXshlyCEfDAyESpz1J8RECu6vQs81E1ZNE5ha5UGw2wk3Ea8oSTfqTiu0OeisV2a6bfldvW4x0OL8PS57uuY0v0OZPSUPWmPQgnmJRVw8vmh62bpFekMnUH7y31fXU6MIyZaiBs1FEu7qF6irBszHt2ARy50SjgGwQZWcecgvB8gB874g3ES9mZer3diYGF3Wssmsm6XRdsNcuNn3yzuoi52cRrBYUOISegTBVApn4zfuCC9Y4AAfe6wmmiuN8hL6KJeOjrdK5EFQHGyrzeuIMaT3B2nKz1PNONVQ0udbqCQebz3cq7NPe6kGKFLiE6euWjdoMuAbuu8rTkAa42ensXz4a1Yo450ZVgYypaDtepDQWFkJyTHDW1HTVZfCok0tp7STRiQ8n3NKxOUSL9veuTsDs1FaV2rbzR3DvkEJrhJ10Rm0pvLgui5GUDKyWLnrqcNVtOIzFaj9K5pwMfnREm1VIs84ePX0GsMjirfOfubzDoYjavbiCtTB86nKx0tfCKtl0yUQ5PWSBqdGASY3mr5hZcFZ9bA6uXXGTNqMpUH3gqxCoF6t2yAim93t77jYkiFt3OBlBRVQzRsPbgEKRXbX3bWQj6NpDzNCQPYTs45HsQB967f4yByzLH8X289YAZJhJJyFTMCLbpdKFuMBX5Msyr4d15sBa1h5bI13dqU14WBnMKD12LkHMjHiyde6xf5EELf082sUfiAZaROFuDCDnA89p6y6oYEUgF1L9yQElZO4R6IrkJsEFN9hvARf3CH4ENqbYxtUN9gsB9CLCGKMy2R4wGKU3Dkyea27YCR4QHCdqX3HqOpy12uxBANvbrfEro9q5NJrGK7WVq3nNabN05x4TmIZk3asc8ehvDyhSgQLY0wwyvrkcYqNiETybJ57RjwVg1YE0IZEBfyAUNXE4goc2jtbZbHfcpTzt08pSJQZTAzuxrdQLS4EnaFHPpMdPh1YXUdclj6g2sjYbhoTYcV97bVDAUztMZ4EarUcv6tgQOvK66RmJCF2zVEpFDBS6AVZJWzrVlnuiweXpH0L9eY2Wy2EuAHi7gL4o0i0AkOapqY1TPUWUwBaVrKQzkL8QQbczgc97pMvSnGYMlcSdzlamFtUmRoOPmhBGMpVqmcxnstnqJ0TXMV65zbRN2hk3YVF5HwPjuWJmfkVYnyazuqKuaaohrQIe7YOOSAmD7C2vDnI50y1oScQqIPb87QAmguFz7jfNBSPymjPJ7UrToaJen7LEQr8S2b69ayZYNIyWbcpaW5ACUqdyT5AeHYhdENORnWS2B17qnBPtyvb4WujJCafLmsMFhQbcGonDZkHEOAnOcwRwJ4KIPr4MlQLRKsdnurPDDEmpCtCnFg8vPObOPHoHgICb9j35pG1YNhAAGIGTZ4g3JTJzFvTcW7GDRxREPZffKOuQTJoMYYaaPwnE0SainEpCFAukJbDy1ss5cZt60nqTw1asLzwMKJu5PHpU9sB9YN7J2cPhIbfb4387zSmSvqbt3I8NFjDbuYEhe6nZ7gRT5Th0W0MoyzHlmy4MSXbaAfUJNsLQJmdhdVKDsqMz0aXKIVNsXtn88owrhw0yqxU0K3IfTothafhpQ8daRUnbjzULViWRvUz7dI1N3GgylRzaEXQPgbj0DQ7RujNTcJoSp7I1ELjFFSBZDm4Jx5eXq0aS2SKJPFX7XmFfkkR99wRiHx4ByVTL5umojRhY5j8vg3l3yfliJbeOTXckaYiezrucuHaiVFWR2kjk9PUm57bDpvtSFMic652iDufj4hqpy5MH5r2lg67T6Bbb3fcq49cVJ3hkN2GfRqVhoPxmHyvotu5koheVh7oHDaLaf4VvcQMd5MF8sicaX3GXfoLjlfFZwfJBpXNbbVemD7XghpIEwuFjA1USU8yJnTdvCJ2bFmPNWFeWsBVDyl7XUsbgB3K2zz806xODZT639dqiqhGXQNbgYtShikQhiHhZF4wf4IY588LE4EO2bdXBb2Wezm8Gl2J5GAfqnx5Z6NF7h1gGkM27hpnmKNylKZjqTNANj0CRU4awpdVrYGX7hT0u452Y5bXpVl7cLuK7j2k7VG93NXPsXADhQA8R9WDcpU0PLzFWFq1omoQ9ZRSlvh8R4pRp4vHIYf4A5uQEmv5Owr4pFQcWdp5GAdkpBaSHvUhvMxOSpsqVB2LHvvs1RiOUHHhHdZEKpX25mK9moud8pKT4efru1SlRRSsxdz87hTJMUrueydHDPXbo9AvExctdqxuCk03Fy8cB57qrkQQ50oGNuTNPColMrwVfmuTt81uSZremLbINILnCVXEnvTugRQfFYMnprqMB4mVJfZfh6XVLdOyW4BPaFrBsZGFy7udoWJwE8ACx4UpJW6m1ltckofzA6AUxzXprXDCCL118m8bBB2hzDKmqeLk5ZYKsLROkTqRAxmJjBSZSo2XBroO5rVvkOZrOZRe8NgaHFMLPn0I6hsqwA7VdKlpbqknax84iWrtBe8ErxgPIQeYhELyK1deW1YWBagD21MBTc2h5LliIlglZg41H8Zl3GvUv0XNZegR5bx1kiM9WFGV9Yt37iQQGquWAMKCAb6AqpkCtKs7sXKaEAVsbh32tlkAg4ngspjwzYHTPYKUuigPX5K8siUfaAW9WJl7r8dc4ju97osWETOcBENLsfwB66TvsttORtOedylnErplZP3hjt7o39JllXDobj3l10bSr4B09eYVWi2DLGavYktKSKj1PrqzuGUaqcFxqoebpuDEAx5vl8ZmSYrmS2RBJ1n2s3lkKdaVWTmfIXlyMMT7Ac3lCXpGNnpf8ccTffv3E0fBrpCSpVc48dM5e5iTpRPrfWxAjrud9jSrqVBXsw3pqUvhuVmBpmwoKAfQGxHrauna3f48AFefGDozxXXjpdM9ZDWHsRUBTFNzDs8tUATtegSzZfNJCS9k0p5q2cueyU1mtwMJIdf0FrsVGiAyX7PFkWvLHi29fpprZQd0gbMMw2Bt10ZbZCsjPX261cXmVa6ZPnkVQm2w1ory3uWejuq20oQCyXTYyv1Ki4tbdPxoNn04Je7uS3QHDCsUl4i9zKNhBJ3g55bhIZWfwmLi3S7oY16gImdC6vvjsMKkCPzXv4pPaVhHH7o4f0mWEz30k4o7GQNOUy8LPM3NmlZF7QaIBdRfozG86jwQkC3jTNR357pdPjOqMERtIS4WEJBgbaeUCu5MOhsNdaD91iCeghIpOECFyTdEkUCGPPCIAtuAOKBdhPu40UxHx30dELMTK3azHOuOnLTsdiM4KJ9yF4Ab2eiz5j2T95sDx3aiEJDVDPCa55hO0XTBM9OSNtdzjdTdZT19XrwD0wPWZcBhfJ66X1uNM2eud1btzglqZP52qqYU7BK2M3BBZKKjy7P6YzmgaPHWnFGHZdwdz3Yq6e3N76Cjkfl8Sy0mkwd6pt0geDM1jNNZrcT8dUfLLaiUqcZm1KRVdpZaBrboDSuCxfWYlxqgsldwlGL4C06ceFUDXX8PzxzWEgOd8OU4F22pcNJOnwJGo6rYA3tvhAuq2WKVg6tgFCb1p7dzF4Ke3J0dv3IneMSNnHG4hkvxW6VzIykDUtYEjMQO35tdnEA0vMVLXIahpJpz4HGs5wwRgoZx1e1zD1pXi7KmEVTlfattgcGFlKjZJ60fEdloZEmiXodxT63CzuJHnjHDOL8qcMzTxHb8OCainga4w1fk4uILLAWqmTFpDcFGSF5lbOFUwhvtMK6knIWZ8ZApZvTGBt1qv3xKUJqPcWiweI4kk57zgyTPZku2mg4fJWDKSfiRSi7LvtpKkdqjein9lP7LMv5lKutprVzjmvHBPjunXGqakWx39xYH8RD6qF3Fw2BnIIesiicZsDv69Ggbu9Y334UeFPNIJ3LGp2I8xcUxlP5dJAh4V05p1HvIZ5Fhk0oCWlvNXdLqzbVsbfW9jWyQTaZXzw7WT3rqFQc7wvw4ayp5eKmUclqB1yOvrI14XGhmH7QMaAYNTIE2RHjYXVgvbmFRi0oB1v4nDEeSTn3KHBRQD8TilCagKg0XYPj2eAgWs12ZRYzlGyCvYZ1pol5wAwc9AFFGwsTJ9UYkbxlZv7wKDx7nFzlUSMC1kMvS2ECwvHzSycqHPRwCGipvG6kWz0mGvASXeKjm47iMROoY0MRK0uvgNdTTOTdxkMgOuCDIlxfit5QKjyzaVAg2kDwENfSd6XPMgSprTSLuNDXdg5NHCwUvDbEHVxpMgOItZymPZtPweOrnPdlEB4UwLZ8jqtShi5oDYvhkh85FwwT25OHFvDUWTTCV5n73pQ8kLo8zsB3mbWfGwg62guj3C50Dh42fAZEPBRSHDRTg3r0z39Vyj490lk2UpZeNyylwuEKmuIqEkbE3BRT2YEjTM8a2PU5grCuzculibcoRUpb1sIQiMRTf4wrtT1CnKcoUJ1T28DC04dTJVRcm3w3WzNLdrnovkX6NahblTzDvq5eXkoEaZv6HClmGuho4FH6s6i0OdmmW8qkNOnk7BhexiyAd3UYERlFwvZ6LP55tFOc3vnlhyylx1rTTgu1NFljRNs7rGiT7SnGFaFK7GITEZFEYI7DmOEUZXxDSHjYuOVN0YAJP2cZFgagyMwGJdrpH8S7cewYPMKz2Go2GBKl1OA6pJ8T91tUdEcGVg9JCMQUA4sBtlIuRTVV3cduIhsLCTi2ewItkh9MRP1kevVa9WcXejQQKreZmq5EZtzThW71r7E2tcvwFeqiwv3JZnV16bZ7NwZT6uvSrOnIFUyMsxhh8xCkVY82VLTAZhPXB8t6CbyjZ5stos6WmNZgoEsD8GU8pmzSTubAqQXkTbiODF2pePe6S9uQ9HngGGBnOjY4QUcAcScDsfflyXVqyxgTelGD4vXoba6qRWCqc9LKpyk4jCKYvLX9tzXusO7bhT2KRvF4MObDqdE4KnCCIF3zeVD0vImR20MmRTBHRCNm3s6GfyeTYEAlW3L2igZJ7Myj5zGLccMt2EohGc38HfWZ4mlvXRLHKB233PyKALYifqlAxTXaWUk13o6nACQDvN7DxSCA0daJeuznK1Dr52bC4IXCTahK1An6LkQMfsXb7Qus6ey241Vb4wTgFHqsdCx7qPxeAghmsTOHRVl") ], "body": '' } benoitc-gunicorn-f5fb19e/tests/requests/valid/027.http000066400000000000000000000000361514360242400230070ustar00rootroot00000000000000GET /à%20k HTTP/1.0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/027.py000066400000000000000000000003541514360242400224630ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/\xc3\xa0%20k"), "version": (1, 0), "headers": [ ], "body": '' } benoitc-gunicorn-f5fb19e/tests/requests/valid/028.http000066400000000000000000000001041514360242400230040ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Content-Length : 3\r\n \r\n xyzbenoitc-gunicorn-f5fb19e/tests/requests/valid/028.py000066400000000000000000000005571514360242400224710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("strip_header_spaces", True) request = { "method": "GET", "uri": uri("/stuff/here?foo=bar"), "version": (1, 1), "headers": [ ("CONTENT-LENGTH", "3"), ], "body": b"xyz" }benoitc-gunicorn-f5fb19e/tests/requests/valid/029.http000066400000000000000000000002011514360242400230030ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Transfer-Encoding: identity\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 000\r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/029.py000066400000000000000000000006011514360242400224600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() request = { "method": "GET", "uri": uri("/stuff/here?foo=bar"), "version": (1, 1), "headers": [ ('TRANSFER-ENCODING', 'identity'), ('TRANSFER-ENCODING', 'chunked'), ], "body": b"hello" } benoitc-gunicorn-f5fb19e/tests/requests/valid/030.http000066400000000000000000000002011514360242400227730ustar00rootroot00000000000000GET /stuff/here?foo=bar HTTP/1.1\r\n Transfer-Encoding: identity\r\n Transfer-Encoding: chunked\r\n \r\n 5\r\n hello\r\n 000\r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/030.py000066400000000000000000000006001514360242400224470ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() request = { "method": "GET", "uri": uri("/stuff/here?foo=bar"), "version": (1, 1), "headers": [ ('TRANSFER-ENCODING', 'identity'), ('TRANSFER-ENCODING', 'chunked') ], "body": b"hello" } benoitc-gunicorn-f5fb19e/tests/requests/valid/031.http000066400000000000000000000000371514360242400230030ustar00rootroot00000000000000-BLARGH /foo HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/031.py000066400000000000000000000003431514360242400224540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "-BLARGH", "uri": uri("/foo"), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/031compat.http000066400000000000000000000000361514360242400242060ustar00rootroot00000000000000-blargh /foo HTTP/1.1\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/031compat.py000066400000000000000000000005601514360242400236610ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("permit_unconventional_http_method", True) cfg.set("casefold_http_method", True) request = { "method": "-BLARGH", "uri": uri("/foo"), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/031compat2.http000066400000000000000000000000371514360242400242710ustar00rootroot00000000000000-blargh /foo HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/031compat2.py000066400000000000000000000005121514360242400237400ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("permit_unconventional_http_method", True) request = { "method": "-blargh", "uri": uri("/foo"), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/040.http000066400000000000000000000002131514360242400227770ustar00rootroot00000000000000GET /keep/same/as?invalid/040 HTTP/1.0\r\n Transfer_Encoding: tricked\r\n Content-Length: 7\r\n Content_Length: -1E23\r\n \r\n tricked\r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/040.py000066400000000000000000000004401514360242400224520ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/keep/same/as?invalid/040"), "version": (1, 0), "headers": [ ("CONTENT-LENGTH", "7") ], "body": b'tricked' } benoitc-gunicorn-f5fb19e/tests/requests/valid/040_compat.http000066400000000000000000000002131514360242400243420ustar00rootroot00000000000000GET /keep/same/as?invalid/040 HTTP/1.0\r\n Transfer_Encoding: tricked\r\n Content-Length: 7\r\n Content_Length: -1E23\r\n \r\n tricked\r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/040_compat.py000066400000000000000000000007071514360242400240230ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("header_map", "dangerous") request = { "method": "GET", "uri": uri("/keep/same/as?invalid/040"), "version": (1, 0), "headers": [ ("TRANSFER_ENCODING", "tricked"), ("CONTENT-LENGTH", "7"), ("CONTENT_LENGTH", "-1E23"), ], "body": b'tricked' } benoitc-gunicorn-f5fb19e/tests/requests/valid/099.http000066400000000000000000000232651514360242400230310ustar00rootroot00000000000000POST /test-form HTTP/1.1\r\n Host: 0.0.0.0:5000\r\n User-Agent: Mozilla/5.0 (Windows NT 6.2; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0\r\n Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n Accept-Language: en-us,en;q=0.7,el;q=0.3\r\n Accept-Encoding: gzip, deflate\r\n Cookie: csrftoken=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX; sessionid=YYYYYYYYYYYYYYYYYYYYYYYYYYYY\r\n Connection: keep-alive\r\n Content-Type: multipart/form-data; boundary=---------------------------320761477111544\r\n Content-Length: 17914\r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="csrfmiddlewaretoken"\r\n \r\n XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="_save"\r\n \r\n Save\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="name"\r\n \r\n test.example.org\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="type"\r\n \r\n NATIVE\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="master"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-TOTAL_FORMS"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-INITIAL_FORMS"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-MAX_NUM_FORMS"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-0-is_dynamic"\r\n \r\n on\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-0-id"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-0-domain"\r\n \r\n 2\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-__prefix__-id"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-__prefix__-domain"\r\n \r\n 2\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-TOTAL_FORMS"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-INITIAL_FORMS"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-MAX_NUM_FORMS"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-ttl"\r\n \r\n 3600\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-primary"\r\n \r\n ns.example.org\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-hostmaster"\r\n \r\n hostmaster.test.example.org\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-serial"\r\n \r\n 2013121701\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-refresh"\r\n \r\n 10800\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-retry"\r\n \r\n 3600\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-expire"\r\n \r\n 604800\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-default_ttl"\r\n \r\n 3600\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-id"\r\n \r\n 16\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-0-domain"\r\n \r\n 2\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-ttl"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-primary"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-hostmaster"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-serial"\r\n \r\n 1\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-refresh"\r\n \r\n 10800\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-retry"\r\n \r\n 3600\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-expire"\r\n \r\n 604800\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-default_ttl"\r\n \r\n 3600\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-id"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-domain"\r\n \r\n 2\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-INITIAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-MAX_NUM_FORMS"\r\n \r\n 1000\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-id"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-domain"\r\n \r\n 2\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-name"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-ttl"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-content"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-INITIAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-MAX_NUM_FORMS"\r\n \r\n 1000\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-id"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-domain"\r\n \r\n 2\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-name"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-ttl"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-prio"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-content"\r\n \r\n \r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-4-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-4-INITIAL_FORMS"\r\n \r\n 0\r\n ---------------------\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-5-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-5-INITIAL_FORMS"\r\n \r\n 0\r\n ---------------------\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-6-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-6-INITIAL_FORMS"\r\n \r\n 0\r\n ---------------------\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-7-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-7-INITIAL_FORMS"\r\n \r\n 0\r\n ---------------------\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-8-TOTAL_FORMS"\r\n \r\n 0\r\n -----------------------------320761477111544\r\n Content-Disposition: form-data; name="foobar_manager_record_domain-8-INITIAL_FORMS"\r\n \r\n 0\r\n ---------------------\r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/099.py000066400000000000000000000220311514360242400224700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "POST", "uri": uri("/test-form"), "version": (1, 1), "headers": [ ("HOST", "0.0.0.0:5000"), ("USER-AGENT", "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0"), ("ACCEPT", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"), ("ACCEPT-LANGUAGE", "en-us,en;q=0.7,el;q=0.3"), ("ACCEPT-ENCODING", "gzip, deflate"), ("COOKIE", "csrftoken=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX; sessionid=YYYYYYYYYYYYYYYYYYYYYYYYYYYY"), ("CONNECTION", "keep-alive"), ("CONTENT-TYPE", "multipart/form-data; boundary=---------------------------320761477111544"), ("CONTENT-LENGTH", "17914"), ], "body": b"""-----------------------------320761477111544 Content-Disposition: form-data; name="csrfmiddlewaretoken" XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -----------------------------320761477111544 Content-Disposition: form-data; name="_save" Save -----------------------------320761477111544 Content-Disposition: form-data; name="name" test.example.org -----------------------------320761477111544 Content-Disposition: form-data; name="type" NATIVE -----------------------------320761477111544 Content-Disposition: form-data; name="master" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-TOTAL_FORMS" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-INITIAL_FORMS" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-MAX_NUM_FORMS" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-0-is_dynamic" on -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-0-id" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-0-domain" 2 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-__prefix__-id" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_dynamiczone_domain-__prefix__-domain" 2 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-TOTAL_FORMS" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-INITIAL_FORMS" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-MAX_NUM_FORMS" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-ttl" 3600 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-primary" ns.example.org -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-hostmaster" hostmaster.test.example.org -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-serial" 2013121701 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-refresh" 10800 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-retry" 3600 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-expire" 604800 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-default_ttl" 3600 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-id" 16 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-0-domain" 2 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-ttl" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-primary" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-hostmaster" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-serial" 1 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-refresh" 10800 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-retry" 3600 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-expire" 604800 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-default_ttl" 3600 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-id" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-__prefix__-domain" 2 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-INITIAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-MAX_NUM_FORMS" 1000 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-id" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-domain" 2 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-name" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-ttl" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-2-__prefix__-content" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-INITIAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-MAX_NUM_FORMS" 1000 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-id" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-domain" 2 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-name" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-ttl" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-prio" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-3-__prefix__-content" -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-4-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-4-INITIAL_FORMS" 0 --------------------- -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-5-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-5-INITIAL_FORMS" 0 --------------------- -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-6-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-6-INITIAL_FORMS" 0 --------------------- -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-7-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-7-INITIAL_FORMS" 0 --------------------- -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-8-TOTAL_FORMS" 0 -----------------------------320761477111544 Content-Disposition: form-data; name="foobar_manager_record_domain-8-INITIAL_FORMS" 0 --------------------- """.decode('utf-8').replace('\n', '\r\n').encode('utf-8'), } benoitc-gunicorn-f5fb19e/tests/requests/valid/100.http000066400000000000000000000000511514360242400227740ustar00rootroot00000000000000GET ///keeping_slashes HTTP/1.1\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/100.py000066400000000000000000000003551514360242400224540ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("///keeping_slashes"), "version": (1, 1), "headers": [], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/compat_obs_fold.http000066400000000000000000000001031514360242400256240ustar00rootroot00000000000000GET / HTTP/1.1\r\n Long: one\r\n two\r\n Host: localhost\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/compat_obs_fold.py000066400000000000000000000006531514360242400253070ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.http.errors import ObsoleteFolding from gunicorn.config import Config cfg = Config() cfg.set('permit_obsolete_folding', True) request = { "method": "GET", "uri": uri("/"), "version": (1, 1), "headers": [ ("LONG", "one two"), ("HOST", "localhost"), ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/compat_obs_fold_huge.http000066400000000000000000000042721514360242400266470ustar00rootroot00000000000000GET / HTTP/1.1\r\n X-SSL-Cert: -----BEGIN CERTIFICATE-----\r\n MIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n ETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n AkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n dWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n SzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n BAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n BQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n W51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n gW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n 0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n u2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n wgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n 1UdEwEB/wQCMAAwEQYJYIZIAYb4QgHTTPAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n BglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n VR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n loCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n aWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n 9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n IjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n BgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n cHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4Qg\r\n EDBDAWLmh0dHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC\r\n 5jcmwwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n Y3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n XCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n UO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n hTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n wTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n Yhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n RA==\r\n -----END CERTIFICATE-----\r\n \r\nbenoitc-gunicorn-f5fb19e/tests/requests/valid/compat_obs_fold_huge.py000066400000000000000000000044621514360242400263210ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set('permit_obsolete_folding', True) certificate = """-----BEGIN CERTIFICATE----- MIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx ETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT AkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu dWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV SzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV BAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF W51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR gW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL 0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP u2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR wgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG 1UdEwEB/wQCMAAwEQYJYIZIAYb4QgHTTPAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs BglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD VR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj loCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj aWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG 9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE IjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO BgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1 cHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4Qg EDBDAWLmh0dHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC 5jcmwwPwYDVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv Y3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3 XCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8 UO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk hTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK wTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu Yhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3 RA== -----END CERTIFICATE-----""".replace("\n", "") request = { "method": "GET", "uri": uri("/"), "version": (1, 1), "headers": [("X-SSL-CERT", certificate)], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/padding_01.http000066400000000000000000000001031514360242400244000ustar00rootroot00000000000000GET / HTTP/1.1\r\n Host: localhost\r\n Name: \t value \t \r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/padding_01.py000066400000000000000000000004341514360242400240600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. request = { "method": "GET", "uri": uri("/"), "version": (1, 1), "headers": [ ("HOST", "localhost"), ("NAME", "value") ], "body": b"", } benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_01.http000066400000000000000000000003071514360242400234170ustar00rootroot00000000000000PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n GET /stuff/here?foo=bar HTTP/1.0\r\n Server: http://127.0.0.1:5984\r\n Content-Type: application/json\r\n Content-Length: 14\r\n \r\n {"nom": "nom"} benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_01.py000066400000000000000000000007211514360242400230700ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set('proxy_protocol', True) request = { "method": "GET", "uri": uri("/stuff/here?foo=bar"), "version": (1, 0), "headers": [ ("SERVER", "http://127.0.0.1:5984"), ("CONTENT-TYPE", "application/json"), ("CONTENT-LENGTH", "14") ], "body": b'{"nom": "nom"}' } benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_02.http000066400000000000000000000005511514360242400234210ustar00rootroot00000000000000PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n GET /stuff/here?foo=bar HTTP/1.1\r\n Server: http://127.0.0.1:5984\r\n Content-Type: application/json\r\n Content-Length: 14\r\n Connection: keep-alive\r\n \r\n {"nom": "nom"} POST /post_chunked_all_your_base HTTP/1.1\r\n Transfer-Encoding: chunked\r\n \r\n 1e\r\n all your base are belong to us\r\n 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_02.py000066400000000000000000000013511514360242400230710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("proxy_protocol", True) req1 = { "method": "GET", "uri": uri("/stuff/here?foo=bar"), "version": (1, 1), "headers": [ ("SERVER", "http://127.0.0.1:5984"), ("CONTENT-TYPE", "application/json"), ("CONTENT-LENGTH", "14"), ("CONNECTION", "keep-alive") ], "body": b'{"nom": "nom"}' } req2 = { "method": "POST", "uri": uri("/post_chunked_all_your_base"), "version": (1, 1), "headers": [ ("TRANSFER-ENCODING", "chunked"), ], "body": b"all your base are belong to us" } request = [req1, req2] benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_03.http000066400000000000000000000001231514360242400234150ustar00rootroot00000000000000GET /no/proxy/header HTTP/1.1\r\n Host: example.com\r\n Content-Length: 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_03.py000066400000000000000000000006051514360242400230730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("proxy_protocol", True) request = { "method": "GET", "uri": uri("/no/proxy/header"), "version": (1, 1), "headers": [ ("HOST", "example.com"), ("CONTENT-LENGTH", "0") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_04.http000066400000000000000000000003011514360242400234140ustar00rootroot00000000000000\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A\x21\x11\x00\x0C\xC0\xA8\x01\x0A\xC0\xA8\x01\x01\x30\x39\x01\xBBGET /proxy/v2/ipv4 HTTP/1.1\r\n Host: example.com\r\n Content-Length: 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_04.py000066400000000000000000000006031514360242400230720ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("proxy_protocol", True) request = { "method": "GET", "uri": uri("/proxy/v2/ipv4"), "version": (1, 1), "headers": [ ("HOST", "example.com"), ("CONTENT-LENGTH", "0") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_05.http000066400000000000000000000004411514360242400234220ustar00rootroot00000000000000\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A\x21\x21\x00\x24\x20\x01\x0D\xB8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x20\x01\x0D\xB8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xD4\x31\x00\x50GET /proxy/v2/ipv6 HTTP/1.1\r\n Host: example.com\r\n Content-Length: 0\r\n \r\n benoitc-gunicorn-f5fb19e/tests/requests/valid/pp_05.py000066400000000000000000000006031514360242400230730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from gunicorn.config import Config cfg = Config() cfg.set("proxy_protocol", True) request = { "method": "GET", "uri": uri("/proxy/v2/ipv6"), "version": (1, 1), "headers": [ ("HOST", "example.com"), ("CONTENT-LENGTH", "0") ], "body": b"" } benoitc-gunicorn-f5fb19e/tests/support.py000066400000000000000000000034731514360242400207220ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import functools import sys import unittest import platform from wsgiref.validate import validator HOST = "127.0.0.1" def create_app(name="World", count=1): message = (('Hello, %s!\n' % name) * count).encode("utf8") length = str(len(message)) @validator def app(environ, start_response): """Simplest possible application object""" status = '200 OK' response_headers = [ ('Content-type', 'text/plain'), ('Content-Length', length), ] start_response(status, response_headers) return iter([message]) return app app = application = create_app() none_app = None def error_factory(): raise TypeError("inner") def requires_mac_ver(*min_version): """Decorator raising SkipTest if the OS is Mac OS X and the OS X version if less than min_version. For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version is lesser than 10.5. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kw): if sys.platform == 'darwin': version_txt = platform.mac_ver()[0] try: version = tuple(map(int, version_txt.split('.'))) except ValueError: pass else: if version < min_version: min_version_txt = '.'.join(map(str, min_version)) raise unittest.SkipTest( "Mac OS X %s or higher required, not %s" % (min_version_txt, version_txt)) return func(*args, **kw) wrapper.min_version = min_version return wrapper return decorator benoitc-gunicorn-f5fb19e/tests/support_dirty_app.py000066400000000000000000000072701514360242400227740ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Support module for dirty app tests.""" from gunicorn.dirty.app import DirtyApp class TestDirtyApp(DirtyApp): """A simple dirty app for testing.""" def __init__(self): self.initialized = False self.closed = False self.data = {} def init(self): self.initialized = True self.data['init_called'] = True def store(self, key, value): self.data[key] = value return {"stored": True, "key": key} def retrieve(self, key): return self.data.get(key) def compute(self, a, b, operation="add"): if operation == "add": return a + b elif operation == "multiply": return a * b else: raise ValueError(f"Unknown operation: {operation}") def close(self): self.closed = True self.data.clear() class BrokenInitApp(DirtyApp): """A dirty app that fails during init.""" def init(self): raise RuntimeError("Init failed!") class BrokenInstantiationApp(DirtyApp): """A dirty app that fails during instantiation.""" def __init__(self): raise RuntimeError("Cannot instantiate!") class NotAClass: """Not a class, just an instance for testing.""" pass not_a_class = NotAClass() class MissingCallApp: """An invalid dirty app missing __call__.""" def init(self): pass def close(self): pass class SlowDirtyApp(DirtyApp): """A dirty app with slow methods for timeout testing.""" def __init__(self): self.initialized = False self.closed = False def init(self): self.initialized = True def slow_action(self, delay=1.0): """An action that takes time to complete.""" import time time.sleep(delay) return {"delayed": True, "duration": delay} def fast_action(self): """A fast action for comparison.""" return {"fast": True} def close(self): self.closed = True class HeavyModelApp(DirtyApp): """A dirty app that simulates a heavy model requiring limited workers. Uses the workers class attribute to limit how many workers load this app. """ workers = 2 # Only 2 workers should load this app def __init__(self): self.initialized = False self.closed = False self.model_data = None self.worker_id = None def init(self): import os self.initialized = True # Store the worker PID to verify which worker handled the request self.worker_id = os.getpid() # Simulate loading a heavy model self.model_data = {"loaded": True, "worker": self.worker_id} def predict(self, data): """Simulate model prediction.""" return { "prediction": f"result_for_{data}", "worker_id": self.worker_id, } def get_worker_id(self): """Return the worker ID that loaded this app.""" return self.worker_id def close(self): self.closed = True self.model_data = None class LightweightApp(DirtyApp): """A lightweight app that should load on all workers.""" def __init__(self): self.initialized = False self.closed = False self.worker_id = None def init(self): import os self.initialized = True self.worker_id = os.getpid() def ping(self): """Simple ping action.""" return {"pong": True, "worker_id": self.worker_id} def get_worker_id(self): """Return the worker ID that loaded this app.""" return self.worker_id def close(self): self.closed = True benoitc-gunicorn-f5fb19e/tests/support_dirty_apps.py000066400000000000000000000061611514360242400231550ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Support module for multi-app dirty tests. Provides test applications with distinct behaviors for verifying that requests are correctly routed to the appropriate app. """ from gunicorn.dirty.app import DirtyApp class CounterApp(DirtyApp): """App that maintains a counter. This app demonstrates stateful behavior where instance variables persist across requests. """ def __init__(self): self.counter = 0 self.initialized = False self.closed = False def init(self): """Initialize the counter app.""" self.counter = 0 self.initialized = True def increment(self, amount=1): """Increment the counter by the given amount. Args: amount: Amount to increment by (default: 1) Returns: The new counter value """ self.counter += amount return self.counter def decrement(self, amount=1): """Decrement the counter by the given amount. Args: amount: Amount to decrement by (default: 1) Returns: The new counter value """ self.counter -= amount return self.counter def get_value(self): """Get the current counter value. Returns: The current counter value """ return self.counter def reset(self): """Reset the counter to zero. Returns: The counter value (0) """ self.counter = 0 return self.counter def close(self): """Clean up the counter app.""" self.closed = True self.counter = 0 class EchoApp(DirtyApp): """App that echoes input with a configurable prefix. This app demonstrates a different behavior pattern from CounterApp for verifying app routing. """ def __init__(self): self.prefix = "ECHO:" self.initialized = False self.closed = False self.echo_count = 0 def init(self): """Initialize the echo app.""" self.prefix = "ECHO:" self.echo_count = 0 self.initialized = True def echo(self, message): """Echo a message with the current prefix. Args: message: The message to echo Returns: The prefixed message """ self.echo_count += 1 return f"{self.prefix} {message}" def set_prefix(self, prefix): """Set a new prefix for echo messages. Args: prefix: The new prefix to use Returns: The new prefix """ self.prefix = prefix return prefix def get_prefix(self): """Get the current prefix. Returns: The current prefix """ return self.prefix def get_echo_count(self): """Get the number of echo calls made. Returns: The echo count """ return self.echo_count def close(self): """Clean up the echo app.""" self.closed = True self.echo_count = 0 benoitc-gunicorn-f5fb19e/tests/t.py000066400000000000000000000030501514360242400174400ustar00rootroot00000000000000# Copyright 2009 Paul J. Davis # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import os import tempfile dirname = os.path.dirname(__file__) from gunicorn.http.parser import RequestParser def data_source(fname): buf = io.BytesIO() with open(fname) as handle: for line in handle: line = line.rstrip("\n").replace("\\r\\n", "\r\n") buf.write(line.encode('latin1')) return buf class request: def __init__(self, name): self.fname = os.path.join(dirname, "requests", name) def __call__(self, func): def run(): src = data_source(self.fname) func(src, RequestParser(src, None, None)) run.func_name = func.func_name return run class FakeSocket: def __init__(self, data): self.tmp = tempfile.TemporaryFile() if data: self.tmp.write(data.getvalue()) self.tmp.flush() self.tmp.seek(0) def fileno(self): return self.tmp.fileno() def len(self): return self.tmp.len def recv(self, length=None): return self.tmp.read(length) def recv_into(self, buf, length): tmp_buffer = self.tmp.read(length) v = len(tmp_buffer) for i, c in enumerate(tmp_buffer): buf[i] = c return v def send(self, data): self.tmp.write(data) self.tmp.flush() def seek(self, offset, whence=0): self.tmp.seek(offset, whence) benoitc-gunicorn-f5fb19e/tests/test_arbiter.py000066400000000000000000000700471514360242400216760ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import signal from unittest import mock import pytest import gunicorn.app.base import gunicorn.arbiter import gunicorn.errors from gunicorn.config import ReusePort class DummyApplication(gunicorn.app.base.BaseApplication): """ Dummy application that has a default configuration. """ def init(self, parser, opts, args): """No-op""" def load(self): """No-op""" def load_config(self): """No-op""" @mock.patch('gunicorn.sock.close_sockets') def test_arbiter_stop_closes_listeners(close_sockets): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) listener1 = mock.Mock() listener2 = mock.Mock() listeners = [listener1, listener2] arbiter.LISTENERS = listeners arbiter.stop() close_sockets.assert_called_with(listeners, True) @mock.patch('gunicorn.sock.close_sockets') def test_arbiter_stop_child_does_not_unlink_listeners(close_sockets): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.reexec_pid = os.getpid() arbiter.stop() close_sockets.assert_called_with([], False) @mock.patch('gunicorn.sock.close_sockets') def test_arbiter_stop_parent_does_not_unlink_listeners(close_sockets): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = os.getppid() arbiter.stop() close_sockets.assert_called_with([], False) @mock.patch('gunicorn.sock.close_sockets') def test_arbiter_stop_does_not_unlink_systemd_listeners(close_sockets): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.systemd = True arbiter.stop() close_sockets.assert_called_with([], False) @mock.patch('gunicorn.sock.close_sockets') def test_arbiter_stop_does_not_unlink_when_using_reuse_port(close_sockets): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['reuse_port'] = ReusePort() arbiter.cfg.settings['reuse_port'].set(True) arbiter.stop() close_sockets.assert_called_with([], False) @mock.patch('os.getpid') @mock.patch('os.fork') @mock.patch('os.execvpe') def test_arbiter_reexec_passing_systemd_sockets(execvpe, fork, getpid): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.LISTENERS = [mock.Mock(), mock.Mock()] arbiter.systemd = True fork.return_value = 0 getpid.side_effect = [2, 3] arbiter.reexec() environ = execvpe.call_args[0][2] assert environ['GUNICORN_PID'] == '2' assert environ['LISTEN_FDS'] == '2' assert environ['LISTEN_PID'] == '3' @mock.patch('os.getpid') @mock.patch('os.fork') @mock.patch('os.execvpe') def test_arbiter_reexec_passing_gunicorn_sockets(execvpe, fork, getpid): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) listener1 = mock.Mock() listener2 = mock.Mock() listener1.fileno.return_value = 4 listener2.fileno.return_value = 5 arbiter.LISTENERS = [listener1, listener2] fork.return_value = 0 getpid.side_effect = [2, 3] arbiter.reexec() environ = execvpe.call_args[0][2] assert environ['GUNICORN_FD'] == '4,5' assert environ['GUNICORN_PID'] == '2' @mock.patch('os.fork') def test_arbiter_reexec_limit_parent(fork): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.reexec_pid = ~os.getpid() arbiter.reexec() assert fork.called is False, "should not fork when there is already a child" @mock.patch('os.fork') def test_arbiter_reexec_limit_child(fork): arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = ~os.getpid() arbiter.reexec() assert fork.called is False, "should not fork when arbiter is a child" @mock.patch('os.fork') def test_arbiter_calls_worker_exit(mock_os_fork): mock_os_fork.return_value = 0 arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['worker_exit'] = mock.Mock() arbiter.pid = None mock_worker = mock.Mock() arbiter.worker_class = mock.Mock(return_value=mock_worker) try: arbiter.spawn_worker() except SystemExit: pass arbiter.cfg.worker_exit.assert_called_with(arbiter, mock_worker) @mock.patch('os.waitpid') def test_arbiter_reap_workers(mock_os_waitpid): mock_os_waitpid.side_effect = [(42, 0), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} arbiter.reap_workers() mock_worker.tmp.close.assert_called_with() arbiter.cfg.child_exit.assert_called_with(arbiter, mock_worker) class PreloadedAppWithEnvSettings(DummyApplication): """ Simple application that makes use of the 'preload' feature to start the application before spawning worker processes and sets environmental variable configuration settings. """ def load_config(self): """Set the 'preload_app' and 'raw_env' settings in order to verify their interaction below. """ self.cfg.set('raw_env', [ 'SOME_PATH=/tmp/something', 'OTHER_PATH=/tmp/something/else']) self.cfg.set('preload_app', True) def wsgi(self): """Assert that the expected environmental variables are set when the main entry point of this application is called as part of a 'preloaded' application. """ verify_env_vars() return super().wsgi() def verify_env_vars(): assert os.getenv('SOME_PATH') == '/tmp/something' assert os.getenv('OTHER_PATH') == '/tmp/something/else' def test_env_vars_available_during_preload(): """Ensure that configured environmental variables are set during the initial set up of the application (called from the .setup() method of the Arbiter) such that they are available during the initial loading of the WSGI application. """ # Note that we aren't making any assertions here, they are made in the # dummy application object being loaded here instead. gunicorn.arbiter.Arbiter(PreloadedAppWithEnvSettings()) # ============================================================================ # Signal Handler Registration Tests # ============================================================================ class TestSignalHandlerRegistration: """Tests for signal handler registration during arbiter initialization.""" def test_init_signals_registers_all_signals(self): """Verify that init_signals registers handlers for all expected signals.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) with mock.patch('signal.signal') as mock_signal: arbiter.init_signals() # Verify all expected signals are registered registered_signals = {call[0][0] for call in mock_signal.call_args_list} expected_signals = set(arbiter.SIGNALS) expected_signals.add(signal.SIGCHLD) assert expected_signals.issubset(registered_signals), \ f"Missing signals: {expected_signals - registered_signals}" def test_init_signals_creates_queue(self): """Verify that arbiter has a SimpleQueue for signals.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) # Verify SimpleQueue was created import queue assert isinstance(arbiter.SIG_QUEUE, queue.SimpleQueue) def test_sigchld_has_separate_handler(self): """Verify that SIGCHLD uses a separate signal handler from other signals.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) with mock.patch('signal.signal') as mock_signal: arbiter.init_signals() # Find the handler for SIGCHLD - uses signal_chld for async-signal-safety sigchld_calls = [c for c in mock_signal.call_args_list if c[0][0] == signal.SIGCHLD] assert len(sigchld_calls) == 1 assert sigchld_calls[0][0][1] == arbiter.signal_chld # Find handlers for other signals other_calls = [c for c in mock_signal.call_args_list if c[0][0] in arbiter.SIGNALS] for call in other_calls: assert call[0][1] == arbiter.signal def test_signals_list_contains_expected(self): """Verify that SIGNALS list contains all expected signal types.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) expected = ['HUP', 'QUIT', 'INT', 'TERM', 'TTIN', 'TTOU', 'USR1', 'USR2', 'WINCH'] for name in expected: sig = getattr(signal, f'SIG{name}') assert sig in arbiter.SIGNALS, f"SIG{name} not in SIGNALS list" # ============================================================================ # Signal Queue Tests # ============================================================================ class TestSignalQueue: """Tests for signal queueing and wakeup mechanism using SimpleQueue.""" def test_signal_queued_on_receipt(self): """Verify that signals are queued when the signal handler is called.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.signal(signal.SIGHUP, None) # Get the signal from the queue sig = arbiter.SIG_QUEUE.get_nowait() assert sig == signal.SIGHUP def test_multiple_signals_queued(self): """Verify that multiple signals can be queued.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) # Queue multiple signals arbiter.signal(signal.SIGHUP, None) arbiter.signal(signal.SIGTERM, None) arbiter.signal_chld(signal.SIGCHLD, None) signals = [] while True: try: signals.append(arbiter.SIG_QUEUE.get_nowait()) except Exception: break assert signal.SIGHUP in signals assert signal.SIGTERM in signals assert signal.SIGCHLD in signals def test_wakeup_puts_sentinel(self): """Verify that wakeup puts the WAKEUP_REQUEST sentinel to the queue.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.wakeup() sig = arbiter.SIG_QUEUE.get_nowait() assert sig == arbiter.WAKEUP_REQUEST def test_wait_for_signals_returns_signals(self): """Verify that wait_for_signals returns queued signals.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) # Queue some signals arbiter.SIG_QUEUE.put_nowait(signal.SIGHUP) arbiter.SIG_QUEUE.put_nowait(signal.SIGTERM) signals = arbiter.wait_for_signals(timeout=0.1) assert signal.SIGHUP in signals assert signal.SIGTERM in signals def test_wait_for_signals_filters_wakeup_request(self): """Verify that WAKEUP_REQUEST sentinel is filtered from results.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) # Queue a wakeup request and a real signal arbiter.SIG_QUEUE.put_nowait(arbiter.WAKEUP_REQUEST) arbiter.SIG_QUEUE.put_nowait(signal.SIGHUP) signals = arbiter.wait_for_signals(timeout=0.1) assert arbiter.WAKEUP_REQUEST not in signals assert signal.SIGHUP in signals # ============================================================================ # Reap Workers Tests # ============================================================================ class TestReapWorkers: """Tests for worker reaping and exit status handling.""" @mock.patch('os.waitpid') def test_reap_normal_exit(self, mock_waitpid): """Verify that a worker with normal exit (code 0) is properly reaped.""" mock_waitpid.side_effect = [(42, 0), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} arbiter.reap_workers() mock_worker.tmp.close.assert_called_once() arbiter.cfg.child_exit.assert_called_once_with(arbiter, mock_worker) assert 42 not in arbiter.WORKERS @mock.patch('os.waitpid') def test_reap_exit_with_error_code(self, mock_waitpid): """Verify that a worker exiting with non-zero code is logged.""" # Exit code 1 (status = 1 << 8 = 256) mock_waitpid.side_effect = [(42, 256), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} with mock.patch.object(arbiter.log, 'error') as mock_log: arbiter.reap_workers() # Should log the error exit assert any('exited with code' in str(call) for call in mock_log.call_args_list) @mock.patch('os.waitpid') def test_reap_worker_boot_error(self, mock_waitpid): """Verify that WORKER_BOOT_ERROR causes HaltServer.""" # Exit code 3 (WORKER_BOOT_ERROR) = status 3 << 8 = 768 mock_waitpid.side_effect = [(42, 768), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} with pytest.raises(gunicorn.errors.HaltServer) as exc_info: arbiter.reap_workers() assert exc_info.value.exit_status == gunicorn.arbiter.Arbiter.WORKER_BOOT_ERROR @mock.patch('os.waitpid') def test_reap_app_load_error(self, mock_waitpid): """Verify that APP_LOAD_ERROR causes HaltServer.""" # Exit code 4 (APP_LOAD_ERROR) = status 4 << 8 = 1024 mock_waitpid.side_effect = [(42, 1024), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} with pytest.raises(gunicorn.errors.HaltServer) as exc_info: arbiter.reap_workers() assert exc_info.value.exit_status == gunicorn.arbiter.Arbiter.APP_LOAD_ERROR @mock.patch('os.waitpid') def test_reap_killed_by_signal(self, mock_waitpid): """Verify that a worker killed by signal is properly identified.""" # Status for SIGTERM (15) killed process mock_waitpid.side_effect = [(42, signal.SIGTERM), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} # SIGTERM should be logged as info (expected during graceful shutdown) with mock.patch.object(arbiter.log, 'info') as mock_log: arbiter.reap_workers() # Should log the signal assert any('SIGTERM' in str(call) for call in mock_log.call_args_list) @mock.patch('os.waitpid') def test_reap_killed_by_sigkill_oom_hint(self, mock_waitpid): """Verify that SIGKILL adds OOM hint to log message.""" # Status for SIGKILL (9) killed process mock_waitpid.side_effect = [(42, signal.SIGKILL), (0, 0)] arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.settings['child_exit'] = mock.Mock() mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} with mock.patch.object(arbiter.log, 'error') as mock_log: arbiter.reap_workers() # Should include OOM hint log_messages = ' '.join(str(call) for call in mock_log.call_args_list) assert 'out of memory' in log_messages.lower() # ============================================================================ # SIGHUP Reload Tests # ============================================================================ class TestSighupReload: """Tests for SIGHUP (reload) handling.""" @mock.patch('gunicorn.arbiter.Arbiter.spawn_worker') @mock.patch('gunicorn.arbiter.Arbiter.manage_workers') def test_reload_spawns_new_workers(self, mock_manage, mock_spawn): """Verify that reload spawns the configured number of workers.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.set('workers', 3) arbiter.LISTENERS = [mock.Mock()] arbiter.pidfile = None # Mock app.reload to prevent it from resetting config arbiter.app.reload = mock.Mock() # Mock setup to prevent it from resetting num_workers arbiter.setup = mock.Mock() arbiter.reload() assert mock_spawn.call_count == 3 @mock.patch('gunicorn.arbiter.Arbiter.spawn_worker') @mock.patch('gunicorn.arbiter.Arbiter.manage_workers') def test_reload_calls_manage_workers(self, mock_manage, mock_spawn): """Verify that reload calls manage_workers after spawning.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.set('workers', 1) arbiter.LISTENERS = [mock.Mock()] arbiter.pidfile = None arbiter.reload() mock_manage.assert_called_once() @mock.patch('gunicorn.arbiter.Arbiter.spawn_worker') @mock.patch('gunicorn.arbiter.Arbiter.manage_workers') def test_reload_logs_hang_up(self, mock_manage, mock_spawn): """Verify that handle_hup logs the hang up message.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.LISTENERS = [mock.Mock()] arbiter.pidfile = None with mock.patch.object(arbiter.log, 'info') as mock_log: arbiter.handle_hup() # Check that "Hang up" was logged assert any('Hang up' in str(call) for call in mock_log.call_args_list) # ============================================================================ # Worker Lifecycle Tests # ============================================================================ class TestWorkerLifecycle: """Tests for worker spawning, killing, and lifecycle management.""" @mock.patch('os.fork') def test_spawn_worker_adds_to_workers_dict(self, mock_fork): """Verify that spawn_worker adds the worker to WORKERS dict.""" mock_fork.return_value = 12345 # Non-zero = parent process arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.WORKERS = {} arbiter.pid = os.getpid() arbiter.LISTENERS = [] pid = arbiter.spawn_worker() assert pid == 12345 assert 12345 in arbiter.WORKERS assert arbiter.WORKERS[12345].age == arbiter.worker_age def test_kill_worker_sends_signal(self): """Verify that kill_worker sends the specified signal.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) mock_worker = mock.Mock() arbiter.WORKERS = {42: mock_worker} with mock.patch('os.kill') as mock_kill: arbiter.kill_worker(42, signal.SIGTERM) mock_kill.assert_called_once_with(42, signal.SIGTERM) def test_murder_workers_sends_sigabrt_first(self): """Verify that murder_workers sends SIGABRT on first timeout.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.timeout = 30 mock_worker = mock.Mock() mock_worker.aborted = False # Simulate timeout by returning a very old update time mock_worker.tmp.last_update.return_value = 0 arbiter.WORKERS = {42: mock_worker} with mock.patch('time.monotonic', return_value=100), \ mock.patch.object(arbiter, 'kill_worker') as mock_kill: arbiter.murder_workers() mock_kill.assert_called_once_with(42, signal.SIGABRT) assert mock_worker.aborted is True def test_murder_workers_sends_sigkill_second(self): """Verify that murder_workers sends SIGKILL on second timeout.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.timeout = 30 mock_worker = mock.Mock() mock_worker.aborted = True # Already aborted once mock_worker.tmp.last_update.return_value = 0 arbiter.WORKERS = {42: mock_worker} with mock.patch('time.monotonic', return_value=100), \ mock.patch.object(arbiter, 'kill_worker') as mock_kill: arbiter.murder_workers() mock_kill.assert_called_once_with(42, signal.SIGKILL) # ============================================================================ # Dirty Arbiter Orphan Cleanup Tests # ============================================================================ class TestDirtyArbiterOrphanCleanup: """Tests for dirty arbiter orphan detection and cleanup.""" def test_get_dirty_pidfile_path(self): """Verify pidfile path is generated correctly.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.proc_name = 'myapp' path = arbiter._get_dirty_pidfile_path() import tempfile expected = os.path.join(tempfile.gettempdir(), 'gunicorn-dirty-myapp.pid') assert path == expected def test_get_dirty_pidfile_path_sanitizes_name(self): """Verify special characters in proc_name are sanitized.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.proc_name = 'my/app name' path = arbiter._get_dirty_pidfile_path() import tempfile expected = os.path.join(tempfile.gettempdir(), 'gunicorn-dirty-my_app_name.pid') assert path == expected def test_get_dirty_pidfile_path_uses_proc_name_not_cfg(self): """Verify pidfile path uses self.proc_name for USR2 compatibility. During USR2, self.proc_name becomes 'myapp.2' while self.cfg.proc_name stays 'myapp'. Using self.proc_name ensures new and old dirty arbiters have different PID file paths. """ arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.set('proc_name', 'myapp') arbiter.proc_name = 'myapp.2' # Simulates USR2 child path = arbiter._get_dirty_pidfile_path() import tempfile # Should use self.proc_name, not self.cfg.proc_name expected = os.path.join(tempfile.gettempdir(), 'gunicorn-dirty-myapp.2.pid') assert path == expected def test_cleanup_orphaned_skipped_during_usr2(self): """Verify cleanup is skipped during USR2 upgrade (master_pid != 0).""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = 12345 # Indicates USR2 upgrade in progress with mock.patch.object(arbiter, '_get_dirty_pidfile_path') as mock_path: arbiter._cleanup_orphaned_dirty_arbiter() # Should not even check the pidfile path mock_path.assert_not_called() def test_cleanup_orphaned_no_pidfile(self): """Verify cleanup handles missing pidfile gracefully.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = 0 with mock.patch('os.path.exists', return_value=False): # Should not raise any exception arbiter._cleanup_orphaned_dirty_arbiter() @mock.patch('os.unlink') @mock.patch('os.kill') def test_cleanup_orphaned_kills_existing_process(self, mock_kill, mock_unlink): """Verify cleanup kills orphaned dirty arbiter process.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = 0 # First kill(pid, 0) succeeds (process exists), then SIGTERM causes exit mock_kill.side_effect = [None, None, OSError(3, "No such process")] import tempfile pidfile = os.path.join(tempfile.gettempdir(), 'gunicorn-dirty-test.pid') with mock.patch('os.path.exists', return_value=True), \ mock.patch('builtins.open', mock.mock_open(read_data='12345')), \ mock.patch.object(arbiter, '_get_dirty_pidfile_path', return_value=pidfile), \ mock.patch('time.sleep'): arbiter._cleanup_orphaned_dirty_arbiter() # Should have sent signal 0 (check), then SIGTERM assert mock_kill.call_args_list[0] == mock.call(12345, 0) assert mock_kill.call_args_list[1] == mock.call(12345, signal.SIGTERM) # Should unlink the stale pidfile mock_unlink.assert_called_with(pidfile) @mock.patch('os.unlink') @mock.patch('os.kill') def test_cleanup_orphaned_sigkill_if_sigterm_fails(self, mock_kill, mock_unlink): """Verify cleanup sends SIGKILL if SIGTERM doesn't work.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = 0 # Process exists on all checks until SIGKILL def kill_side_effect(pid, sig): if sig == signal.SIGKILL: return None return None # Process still running mock_kill.side_effect = kill_side_effect import tempfile pidfile = os.path.join(tempfile.gettempdir(), 'gunicorn-dirty-test.pid') with mock.patch('os.path.exists', return_value=True), \ mock.patch('builtins.open', mock.mock_open(read_data='12345')), \ mock.patch.object(arbiter, '_get_dirty_pidfile_path', return_value=pidfile), \ mock.patch('time.sleep'): arbiter._cleanup_orphaned_dirty_arbiter() # Should end with SIGKILL kill_calls = [c for c in mock_kill.call_args_list if c[0][1] == signal.SIGKILL] assert len(kill_calls) == 1 @mock.patch('os.unlink') def test_cleanup_orphaned_stale_pidfile_no_process(self, mock_unlink): """Verify cleanup removes stale pidfile when process doesn't exist.""" arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.master_pid = 0 import tempfile pidfile = os.path.join(tempfile.gettempdir(), 'gunicorn-dirty-test.pid') with mock.patch('os.path.exists', return_value=True), \ mock.patch('builtins.open', mock.mock_open(read_data='12345')), \ mock.patch.object(arbiter, '_get_dirty_pidfile_path', return_value=pidfile), \ mock.patch('os.kill', side_effect=OSError(3, "No such process")): arbiter._cleanup_orphaned_dirty_arbiter() # Should still unlink the stale pidfile mock_unlink.assert_called_with(pidfile) @mock.patch('gunicorn.dirty.DirtyArbiter') @mock.patch('os.fork') def test_spawn_dirty_arbiter_calls_cleanup(self, mock_fork, mock_dirty_arbiter): """Verify spawn_dirty_arbiter calls orphan cleanup before spawning.""" mock_fork.return_value = 12345 # Parent process mock_arbiter_instance = mock.Mock() mock_arbiter_instance.socket_path = '/tmp/test.sock' mock_dirty_arbiter.return_value = mock_arbiter_instance arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.set('dirty_workers', 1) arbiter.cfg.set('dirty_apps', ['test:app']) with mock.patch.object(arbiter, '_cleanup_orphaned_dirty_arbiter') as mock_cleanup, \ mock.patch.object(arbiter, '_get_dirty_pidfile_path', return_value='/tmp/test.pid'), \ mock.patch('gunicorn.dirty.set_dirty_socket_path'): arbiter.spawn_dirty_arbiter() mock_cleanup.assert_called_once() @mock.patch('os.fork') def test_spawn_dirty_arbiter_passes_pidfile(self, mock_fork): """Verify spawn_dirty_arbiter passes pidfile to DirtyArbiter.""" mock_fork.return_value = 12345 # Parent process arbiter = gunicorn.arbiter.Arbiter(DummyApplication()) arbiter.cfg.set('dirty_workers', 1) arbiter.cfg.set('dirty_apps', ['test:app']) pidfile_path = '/tmp/gunicorn-dirty-test.pid' # Note: DirtyArbiter is now lazily imported in spawn_dirty_arbiter(), # so we mock it in gunicorn.dirty where it's defined with mock.patch.object(arbiter, '_cleanup_orphaned_dirty_arbiter'), \ mock.patch.object(arbiter, '_get_dirty_pidfile_path', return_value=pidfile_path), \ mock.patch('gunicorn.dirty.DirtyArbiter') as mock_dirty_arbiter, \ mock.patch('gunicorn.dirty.set_dirty_socket_path'): mock_arbiter_instance = mock.Mock() mock_arbiter_instance.socket_path = '/tmp/test.sock' mock_dirty_arbiter.return_value = mock_arbiter_instance arbiter.spawn_dirty_arbiter() # Verify DirtyArbiter was called with pidfile parameter mock_dirty_arbiter.assert_called_once() call_kwargs = mock_dirty_arbiter.call_args[1] assert call_kwargs.get('pidfile') == pidfile_path benoitc-gunicorn-f5fb19e/tests/test_asgi.py000066400000000000000000000215461514360242400211710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Tests for ASGI worker components. """ import asyncio import io import ipaddress import pytest from unittest import mock from gunicorn.asgi.unreader import AsyncUnreader from gunicorn.asgi.message import AsyncRequest class MockStreamReader: """Mock asyncio.StreamReader for testing.""" def __init__(self, data): self.data = data self.pos = 0 async def read(self, size=-1): if self.pos >= len(self.data): return b"" if size < 0: result = self.data[self.pos:] self.pos = len(self.data) else: result = self.data[self.pos:self.pos + size] self.pos += size return result async def readexactly(self, n): if self.pos + n > len(self.data): raise asyncio.IncompleteReadError( self.data[self.pos:], n ) result = self.data[self.pos:self.pos + n] self.pos += n return result class MockConfig: """Mock gunicorn config for testing.""" def __init__(self): self.is_ssl = False self.proxy_protocol = "off" self.proxy_allow_ips = ["127.0.0.1"] self.forwarded_allow_ips = ["127.0.0.1"] self._proxy_allow_networks = None self._forwarded_allow_networks = None self.secure_scheme_headers = {} self.forwarder_headers = [] self.limit_request_line = 8190 self.limit_request_fields = 100 self.limit_request_field_size = 8190 self.permit_unconventional_http_method = False self.permit_unconventional_http_version = False self.permit_obsolete_folding = False self.casefold_http_method = False self.strip_header_spaces = False self.header_map = "refuse" def forwarded_allow_networks(self): if self._forwarded_allow_networks is None: self._forwarded_allow_networks = [ ipaddress.ip_network(addr) for addr in self.forwarded_allow_ips if addr != "*" ] return self._forwarded_allow_networks def proxy_allow_networks(self): if self._proxy_allow_networks is None: self._proxy_allow_networks = [ ipaddress.ip_network(addr) for addr in self.proxy_allow_ips if addr != "*" ] return self._proxy_allow_networks # AsyncUnreader Tests @pytest.mark.asyncio async def test_async_unreader_read_chunk(): """Test basic chunk reading.""" reader = MockStreamReader(b"hello world") unreader = AsyncUnreader(reader) data = await unreader.read() assert data == b"hello world" @pytest.mark.asyncio async def test_async_unreader_read_size(): """Test reading specific size.""" reader = MockStreamReader(b"hello world") unreader = AsyncUnreader(reader) data = await unreader.read(5) assert data == b"hello" @pytest.mark.asyncio async def test_async_unreader_unread(): """Test unread functionality.""" reader = MockStreamReader(b"hello world") unreader = AsyncUnreader(reader) # Read all data data = await unreader.read() assert data == b"hello world" # Unread some data unreader.unread(b"world") # Read again should get unread data data = await unreader.read() assert data == b"world" @pytest.mark.asyncio async def test_async_unreader_read_zero(): """Test reading zero bytes.""" reader = MockStreamReader(b"hello") unreader = AsyncUnreader(reader) data = await unreader.read(0) assert data == b"" @pytest.mark.asyncio async def test_async_unreader_read_empty(): """Test reading from empty stream.""" reader = MockStreamReader(b"") unreader = AsyncUnreader(reader) data = await unreader.read() assert data == b"" # AsyncRequest Tests @pytest.mark.asyncio async def test_async_request_simple_get(): """Test parsing a simple GET request.""" request_data = b"GET /path HTTP/1.1\r\nHost: localhost\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.method == "GET" assert request.path == "/path" assert request.version == (1, 1) assert ("HOST", "localhost") in request.headers @pytest.mark.asyncio async def test_async_request_with_query(): """Test parsing request with query string.""" request_data = b"GET /search?q=test&page=1 HTTP/1.1\r\nHost: localhost\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.method == "GET" assert request.path == "/search" assert request.query == "q=test&page=1" @pytest.mark.asyncio async def test_async_request_post_with_body(): """Test parsing POST request with body.""" request_data = ( b"POST /submit HTTP/1.1\r\n" b"Host: localhost\r\n" b"Content-Length: 11\r\n" b"\r\n" b"hello=world" ) reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.method == "POST" assert request.path == "/submit" assert request.content_length == 11 # Read body body = await request.read_body(100) assert body == b"hello=world" @pytest.mark.asyncio async def test_async_request_multiple_headers(): """Test parsing request with multiple headers.""" request_data = ( b"GET / HTTP/1.1\r\n" b"Host: localhost\r\n" b"Accept: text/html\r\n" b"Accept-Language: en-US\r\n" b"Connection: keep-alive\r\n" b"\r\n" ) reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert len(request.headers) == 4 assert request.get_header("HOST") == "localhost" assert request.get_header("ACCEPT") == "text/html" @pytest.mark.asyncio async def test_async_request_should_close_http10(): """Test connection close detection for HTTP/1.0.""" request_data = b"GET / HTTP/1.0\r\nHost: localhost\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.version == (1, 0) assert request.should_close() is True @pytest.mark.asyncio async def test_async_request_should_close_connection_header(): """Test connection close detection with Connection header.""" request_data = b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.should_close() is True @pytest.mark.asyncio async def test_async_request_keepalive(): """Test keepalive detection.""" request_data = b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.should_close() is False @pytest.mark.asyncio async def test_async_request_no_body_for_get(): """Test that GET requests have no body by default.""" request_data = b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.content_length == 0 body = await request.read_body() assert body == b"" # Error handling tests @pytest.mark.asyncio async def test_async_request_invalid_method(): """Test invalid HTTP method detection.""" from gunicorn.http.errors import InvalidRequestMethod request_data = b"ge!t / HTTP/1.1\r\nHost: localhost\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() with pytest.raises(InvalidRequestMethod): await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) @pytest.mark.asyncio async def test_async_request_invalid_http_version(): """Test invalid HTTP version detection.""" from gunicorn.http.errors import InvalidHTTPVersion request_data = b"GET / HTTP/2.0\r\nHost: localhost\r\n\r\n" reader = MockStreamReader(request_data) unreader = AsyncUnreader(reader) cfg = MockConfig() with pytest.raises(InvalidHTTPVersion): await AsyncRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) benoitc-gunicorn-f5fb19e/tests/test_asgi_compliance.py000066400000000000000000000664101514360242400233620ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI 3.0 specification compliance tests. Tests that gunicorn's ASGI implementation conforms to the ASGI 3.0 spec: https://asgi.readthedocs.io/en/latest/specs/main.html """ import asyncio from unittest import mock from gunicorn.config import Config # ============================================================================ # ASGI Version Tests # ============================================================================ class TestASGIVersion: """Test ASGI version information in scope.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) return request def test_asgi_version_present(self): """Test that 'asgi' key is present in HTTP scope.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert "asgi" in scope def test_asgi_version_is_dict(self): """Test that 'asgi' value is a dictionary.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert isinstance(scope["asgi"], dict) def test_asgi_version_value(self): """Test that ASGI version is '3.0'.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert scope["asgi"]["version"] == "3.0" def test_asgi_spec_version_present(self): """Test that spec_version is present in ASGI dict.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert "spec_version" in scope["asgi"] def test_asgi_spec_version_value(self): """Test that spec_version follows semantic versioning.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) spec_version = scope["asgi"]["spec_version"] # Should be in format "X.Y" (major.minor) parts = spec_version.split(".") assert len(parts) == 2 assert all(part.isdigit() for part in parts) # ============================================================================ # HTTP Scope Keys Tests (ASGI HTTP Connection Scope) # ============================================================================ class TestHTTPScopeKeys: """Test required keys in HTTP connection scope per ASGI spec.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) return request def test_type_key_present(self): """Test 'type' key is present and equals 'http'.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert scope["type"] == "http" def test_http_version_key_present(self): """Test 'http_version' key is present.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert "http_version" in scope assert scope["http_version"] == "1.1" def test_http_version_formats(self): """Test various HTTP version formats.""" protocol = self._create_protocol() # HTTP/1.0 request_10 = self._create_mock_request(version=(1, 0)) scope_10 = protocol._build_http_scope(request_10, None, None) assert scope_10["http_version"] == "1.0" # HTTP/1.1 request_11 = self._create_mock_request(version=(1, 1)) scope_11 = protocol._build_http_scope(request_11, None, None) assert scope_11["http_version"] == "1.1" def test_method_key_present(self): """Test 'method' key is present and is uppercase string.""" protocol = self._create_protocol() for method in ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"]: request = self._create_mock_request(method=method) scope = protocol._build_http_scope(request, None, None) assert scope["method"] == method assert scope["method"].isupper() def test_scheme_key_present(self): """Test 'scheme' key is present.""" protocol = self._create_protocol() # HTTP request_http = self._create_mock_request(scheme="http") scope_http = protocol._build_http_scope(request_http, None, None) assert scope_http["scheme"] == "http" # HTTPS request_https = self._create_mock_request(scheme="https") scope_https = protocol._build_http_scope(request_https, None, None) assert scope_https["scheme"] == "https" def test_path_key_present(self): """Test 'path' key is present and starts with /.""" protocol = self._create_protocol() request = self._create_mock_request(path="/api/users") scope = protocol._build_http_scope(request, None, None) assert "path" in scope assert scope["path"] == "/api/users" assert scope["path"].startswith("/") def test_raw_path_key_present(self): """Test 'raw_path' key is present and is bytes.""" protocol = self._create_protocol() request = self._create_mock_request(path="/api/users") scope = protocol._build_http_scope(request, None, None) assert "raw_path" in scope assert isinstance(scope["raw_path"], bytes) assert scope["raw_path"] == b"/api/users" def test_query_string_key_present(self): """Test 'query_string' key is present and is bytes.""" protocol = self._create_protocol() request = self._create_mock_request(query="page=1&limit=10") scope = protocol._build_http_scope(request, None, None) assert "query_string" in scope assert isinstance(scope["query_string"], bytes) assert scope["query_string"] == b"page=1&limit=10" def test_query_string_empty(self): """Test 'query_string' is empty bytes when no query.""" protocol = self._create_protocol() request = self._create_mock_request(query="") scope = protocol._build_http_scope(request, None, None) assert scope["query_string"] == b"" def test_root_path_key_present(self): """Test 'root_path' key is present.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert "root_path" in scope assert isinstance(scope["root_path"], str) def test_headers_key_present(self): """Test 'headers' key is present and is list of 2-tuples.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[("HOST", "localhost"), ("ACCEPT", "text/html")] ) scope = protocol._build_http_scope(request, None, None) assert "headers" in scope assert isinstance(scope["headers"], list) for header in scope["headers"]: assert isinstance(header, tuple) assert len(header) == 2 def test_headers_are_bytes(self): """Test that header names and values are bytes.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[("HOST", "localhost"), ("CONTENT-TYPE", "application/json")] ) scope = protocol._build_http_scope(request, None, None) for name, value in scope["headers"]: assert isinstance(name, bytes), f"Header name should be bytes: {name}" assert isinstance(value, bytes), f"Header value should be bytes: {value}" def test_headers_names_lowercase(self): """Test that header names are lowercase.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[("HOST", "localhost"), ("Content-Type", "application/json")] ) scope = protocol._build_http_scope(request, None, None) for name, _ in scope["headers"]: assert name == name.lower(), f"Header name should be lowercase: {name}" def test_server_key_present(self): """Test 'server' key is present when sockname provided.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert "server" in scope assert scope["server"] == ("127.0.0.1", 8000) def test_server_key_none(self): """Test 'server' key is None when sockname not provided.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert scope["server"] is None def test_client_key_present(self): """Test 'client' key is present when peername provided.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("192.168.1.100", 54321), ) assert "client" in scope assert scope["client"] == ("192.168.1.100", 54321) def test_client_key_none(self): """Test 'client' key is None when peername not provided.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert scope["client"] is None # ============================================================================ # HTTP Message Format Tests # ============================================================================ class TestHTTPMessageFormats: """Test HTTP message formats per ASGI spec.""" def test_http_request_message_format(self): """Test http.request message format.""" message = { "type": "http.request", "body": b"request body", "more_body": False, } assert message["type"] == "http.request" assert isinstance(message["body"], bytes) assert isinstance(message["more_body"], bool) def test_http_request_message_empty_body(self): """Test http.request message with empty body.""" message = { "type": "http.request", "body": b"", "more_body": False, } assert message["body"] == b"" assert message["more_body"] is False def test_http_response_start_format(self): """Test http.response.start message format.""" message = { "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), (b"content-length", b"13"), ], } assert message["type"] == "http.response.start" assert isinstance(message["status"], int) assert 100 <= message["status"] < 600 assert isinstance(message["headers"], list) def test_http_response_body_format(self): """Test http.response.body message format.""" message = { "type": "http.response.body", "body": b"Hello, World!", "more_body": False, } assert message["type"] == "http.response.body" assert isinstance(message["body"], bytes) assert isinstance(message["more_body"], bool) def test_http_response_body_streaming(self): """Test http.response.body message for streaming.""" # First chunk chunk1 = { "type": "http.response.body", "body": b"First chunk", "more_body": True, } # Last chunk chunk2 = { "type": "http.response.body", "body": b"Last chunk", "more_body": False, } assert chunk1["more_body"] is True assert chunk2["more_body"] is False def test_http_disconnect_format(self): """Test http.disconnect message format.""" message = {"type": "http.disconnect"} assert message["type"] == "http.disconnect" # ============================================================================ # HTTP Response Status Codes Tests # ============================================================================ class TestHTTPStatusCodes: """Test HTTP status code handling.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def test_reason_phrase_informational(self): """Test reason phrases for 1xx status codes.""" protocol = self._create_protocol() assert protocol._get_reason_phrase(100) == "Continue" assert protocol._get_reason_phrase(101) == "Switching Protocols" assert protocol._get_reason_phrase(103) == "Early Hints" def test_reason_phrase_success(self): """Test reason phrases for 2xx status codes.""" protocol = self._create_protocol() assert protocol._get_reason_phrase(200) == "OK" assert protocol._get_reason_phrase(201) == "Created" assert protocol._get_reason_phrase(202) == "Accepted" assert protocol._get_reason_phrase(204) == "No Content" assert protocol._get_reason_phrase(206) == "Partial Content" def test_reason_phrase_redirect(self): """Test reason phrases for 3xx status codes.""" protocol = self._create_protocol() assert protocol._get_reason_phrase(301) == "Moved Permanently" assert protocol._get_reason_phrase(302) == "Found" assert protocol._get_reason_phrase(303) == "See Other" assert protocol._get_reason_phrase(304) == "Not Modified" assert protocol._get_reason_phrase(307) == "Temporary Redirect" assert protocol._get_reason_phrase(308) == "Permanent Redirect" def test_reason_phrase_client_error(self): """Test reason phrases for 4xx status codes.""" protocol = self._create_protocol() assert protocol._get_reason_phrase(400) == "Bad Request" assert protocol._get_reason_phrase(401) == "Unauthorized" assert protocol._get_reason_phrase(403) == "Forbidden" assert protocol._get_reason_phrase(404) == "Not Found" assert protocol._get_reason_phrase(405) == "Method Not Allowed" assert protocol._get_reason_phrase(408) == "Request Timeout" assert protocol._get_reason_phrase(409) == "Conflict" assert protocol._get_reason_phrase(410) == "Gone" assert protocol._get_reason_phrase(422) == "Unprocessable Entity" assert protocol._get_reason_phrase(429) == "Too Many Requests" def test_reason_phrase_server_error(self): """Test reason phrases for 5xx status codes.""" protocol = self._create_protocol() assert protocol._get_reason_phrase(500) == "Internal Server Error" assert protocol._get_reason_phrase(501) == "Not Implemented" assert protocol._get_reason_phrase(502) == "Bad Gateway" assert protocol._get_reason_phrase(503) == "Service Unavailable" assert protocol._get_reason_phrase(504) == "Gateway Timeout" def test_reason_phrase_unknown(self): """Test reason phrase for unknown status codes.""" protocol = self._create_protocol() assert protocol._get_reason_phrase(999) == "Unknown" assert protocol._get_reason_phrase(418) == "Unknown" # I'm a teapot not defined # ============================================================================ # Informational Response Tests (103 Early Hints, etc.) # ============================================================================ class TestInformationalResponses: """Test support for HTTP 1xx informational responses.""" def test_http_response_informational_format(self): """Test http.response.informational message format.""" message = { "type": "http.response.informational", "status": 103, "headers": [ (b"link", b"; rel=preload; as=style"), ], } assert message["type"] == "http.response.informational" assert 100 <= message["status"] < 200 assert isinstance(message["headers"], list) def test_early_hints_103(self): """Test 103 Early Hints message format.""" message = { "type": "http.response.informational", "status": 103, "headers": [ (b"link", b"; rel=preload; as=style"), (b"link", b"; rel=preload; as=script"), ], } assert message["status"] == 103 # ============================================================================ # ASGI Extensions Tests # ============================================================================ class TestASGIExtensions: """Test ASGI extensions support.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_http2_request(self, **kwargs): """Create a mock HTTP/2 request with priority.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.uri = kwargs.get("uri", "/") request.scheme = kwargs.get("scheme", "https") request.headers = kwargs.get("headers", []) request.priority_weight = kwargs.get("priority_weight", 16) request.priority_depends_on = kwargs.get("priority_depends_on", 0) return request def test_http2_scope_has_extensions(self): """Test that HTTP/2 scope includes extensions dict.""" protocol = self._create_protocol() request = self._create_mock_http2_request() scope = protocol._build_http2_scope(request, None, None) assert "extensions" in scope assert isinstance(scope["extensions"], dict) def test_http2_priority_extension(self): """Test http.response.priority extension in HTTP/2 scope.""" protocol = self._create_protocol() request = self._create_mock_http2_request( priority_weight=128, priority_depends_on=5, ) scope = protocol._build_http2_scope(request, None, None) assert "http.response.priority" in scope["extensions"] priority = scope["extensions"]["http.response.priority"] assert "weight" in priority assert "depends_on" in priority assert priority["weight"] == 128 assert priority["depends_on"] == 5 def test_http2_trailers_extension(self): """Test http.response.trailers extension in HTTP/2 scope.""" protocol = self._create_protocol() request = self._create_mock_http2_request() scope = protocol._build_http2_scope(request, None, None) assert "http.response.trailers" in scope["extensions"] def test_http_response_trailers_message_format(self): """Test http.response.trailers message format.""" message = { "type": "http.response.trailers", "headers": [ (b"grpc-status", b"0"), (b"grpc-message", b""), ], "more_trailers": False, } assert message["type"] == "http.response.trailers" assert isinstance(message["headers"], list) # ============================================================================ # State Sharing Tests # ============================================================================ class TestStateSharing: """Test state sharing between lifespan and request scopes.""" def _create_protocol_with_state(self, state): """Create an ASGIProtocol with worker state.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() worker.state = state return ASGIProtocol(worker) def _create_mock_request(self): """Create a mock HTTP request.""" request = mock.Mock() request.method = "GET" request.path = "/" request.query = "" request.version = (1, 1) request.scheme = "http" request.headers = [] return request def test_state_in_http_scope(self): """Test that state dict is included in HTTP scope.""" state = {"db": "connected", "cache": "ready"} protocol = self._create_protocol_with_state(state) request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert "state" in scope assert scope["state"] == state def test_state_is_same_object(self): """Test that state is the same object (not a copy).""" state = {"counter": 0} protocol = self._create_protocol_with_state(state) request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) # Modifying scope["state"] should modify the original scope["state"]["counter"] = 1 assert state["counter"] == 1 def test_state_not_present_without_worker_state(self): """Test that state is not in scope if worker has no state.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock(spec=["cfg", "log", "asgi"]) worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert "state" not in scope # ============================================================================ # HTTP Disconnect Event Tests (ASGI Spec Compliance) # https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event # ============================================================================ class TestHTTPDisconnectEvent: """Test http.disconnect event compliance with ASGI spec. Per the ASGI HTTP Connection Scope spec: - Disconnect event is sent when client closes connection - Event type MUST be "http.disconnect" - Apps should receive this event and clean up gracefully """ def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() worker.nr_conns = 1 worker.loop = mock.Mock() protocol = ASGIProtocol(worker) protocol.reader = mock.Mock() return protocol def test_disconnect_event_type(self): """Test that disconnect event has correct type per ASGI spec.""" protocol = self._create_protocol() protocol._receive_queue = asyncio.Queue() # Simulate client disconnect protocol.connection_lost(None) # Get the message from queue msg = protocol._receive_queue.get_nowait() # Per ASGI spec: type MUST be "http.disconnect" assert msg["type"] == "http.disconnect" def test_disconnect_event_sent_on_connection_lost(self): """Test that http.disconnect is sent when connection is lost.""" protocol = self._create_protocol() protocol._receive_queue = asyncio.Queue() assert protocol._receive_queue.empty() # Simulate client disconnect protocol.connection_lost(None) # Queue should have disconnect message assert not protocol._receive_queue.empty() def test_disconnect_sets_closed_flag(self): """Test that connection_lost sets the closed flag.""" protocol = self._create_protocol() assert protocol._closed is False protocol.connection_lost(None) assert protocol._closed is True def test_disconnect_allows_graceful_cleanup(self): """Test that disconnect doesn't immediately cancel task. Per ASGI spec, apps should have opportunity to clean up when they receive http.disconnect. """ protocol = self._create_protocol() # Create a mock task mock_task = mock.Mock() mock_task.done.return_value = False protocol._task = mock_task # Simulate disconnect protocol.connection_lost(None) # Task should NOT be cancelled immediately mock_task.cancel.assert_not_called() # Cancellation should be scheduled after grace period protocol.worker.loop.call_later.assert_called_once() def test_disconnect_message_format(self): """Test http.disconnect message format per ASGI spec. The disconnect message should only contain 'type' key. """ protocol = self._create_protocol() protocol._receive_queue = asyncio.Queue() protocol.connection_lost(None) msg = protocol._receive_queue.get_nowait() # Per ASGI spec, disconnect message only has 'type' assert msg == {"type": "http.disconnect"} assert len(msg) == 1 benoitc-gunicorn-f5fb19e/tests/test_asgi_disconnect.py000066400000000000000000000151651514360242400234020ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Tests for ASGI graceful disconnect handling. Issue: https://github.com/benoitc/gunicorn/issues/3484 When a client disconnects, the ASGI worker should: 1. Send http.disconnect to the receive queue 2. Allow the app a grace period to clean up 3. Only cancel the task after the grace period """ import asyncio from unittest import mock import pytest from gunicorn.asgi.protocol import ASGIProtocol class TestASGIGracefulDisconnect: """Test graceful disconnect handling.""" @pytest.fixture def mock_worker(self): """Create a mock worker.""" worker = mock.Mock() worker.nr_conns = 0 worker.loop = asyncio.new_event_loop() worker.cfg = mock.Mock() worker.cfg.asgi_disconnect_grace_period = 3 worker.log = mock.Mock() return worker def test_disconnect_sets_closed_flag(self, mock_worker): """Test that connection_lost sets the closed flag.""" protocol = ASGIProtocol(mock_worker) protocol.reader = mock.Mock() # Simulate connection made mock_worker.nr_conns = 1 assert protocol._closed is False # Simulate connection lost protocol.connection_lost(None) assert protocol._closed is True def test_disconnect_sends_message_to_queue(self, mock_worker): """Test that connection_lost sends http.disconnect to receive queue.""" protocol = ASGIProtocol(mock_worker) protocol.reader = mock.Mock() mock_worker.nr_conns = 1 # Create a receive queue (simulating active request) protocol._receive_queue = asyncio.Queue() # Simulate connection lost protocol.connection_lost(None) # Check that disconnect message was sent assert not protocol._receive_queue.empty() msg = protocol._receive_queue.get_nowait() assert msg == {"type": "http.disconnect"} def test_disconnect_is_idempotent(self, mock_worker): """Test that connection_lost can be called multiple times safely.""" protocol = ASGIProtocol(mock_worker) protocol.reader = mock.Mock() mock_worker.nr_conns = 2 # Start with 2 so we can verify only 1 is decremented protocol._receive_queue = asyncio.Queue() # First call should work protocol.connection_lost(None) assert protocol._closed is True assert mock_worker.nr_conns == 1 assert protocol._receive_queue.qsize() == 1 # Second call should be a no-op protocol.connection_lost(None) assert mock_worker.nr_conns == 1 # Should not decrement again assert protocol._receive_queue.qsize() == 1 # Should not add another message def test_disconnect_does_not_cancel_immediately(self, mock_worker): """Test that connection_lost doesn't cancel task immediately.""" protocol = ASGIProtocol(mock_worker) protocol.reader = mock.Mock() mock_worker.nr_conns = 1 # Create a mock task mock_task = mock.Mock() mock_task.done.return_value = False protocol._task = mock_task # Simulate connection lost protocol.connection_lost(None) # Task should NOT be cancelled immediately mock_task.cancel.assert_not_called() def test_disconnect_schedules_cancellation(self, mock_worker): """Test that connection_lost schedules task cancellation.""" # Use a mock loop for this test to verify call_later was called mock_loop = mock.Mock() mock_worker.loop = mock_loop protocol = ASGIProtocol(mock_worker) protocol.reader = mock.Mock() mock_worker.nr_conns = 1 # Create a mock task mock_task = mock.Mock() mock_task.done.return_value = False protocol._task = mock_task # Simulate connection lost protocol.connection_lost(None) # call_later should have been called to schedule cancellation mock_loop.call_later.assert_called_once() args = mock_loop.call_later.call_args[0] assert args[0] == mock_worker.cfg.asgi_disconnect_grace_period assert args[1] == protocol._cancel_task_if_pending def test_cancel_task_if_pending_cancels_running_task(self, mock_worker): """Test that _cancel_task_if_pending cancels a running task.""" protocol = ASGIProtocol(mock_worker) # Create a mock task that's still running mock_task = mock.Mock() mock_task.done.return_value = False protocol._task = mock_task protocol._cancel_task_if_pending() mock_task.cancel.assert_called_once() def test_cancel_task_if_pending_skips_completed_task(self, mock_worker): """Test that _cancel_task_if_pending doesn't cancel completed tasks.""" protocol = ASGIProtocol(mock_worker) # Create a mock task that's already done mock_task = mock.Mock() mock_task.done.return_value = True protocol._task = mock_task protocol._cancel_task_if_pending() mock_task.cancel.assert_not_called() @pytest.mark.asyncio async def test_receive_returns_disconnect_when_closed(self, mock_worker): """Test that receive() returns http.disconnect when connection is closed.""" protocol = ASGIProtocol(mock_worker) protocol._closed = True # Create receive queue with body complete receive_queue = asyncio.Queue() protocol._receive_queue = receive_queue # Add initial body message await receive_queue.put({ "type": "http.request", "body": b"", "more_body": False, }) # Simulate what happens in _handle_http_request body_complete = False async def receive(): nonlocal body_complete if protocol._closed and body_complete: return {"type": "http.disconnect"} msg = await receive_queue.get() if msg.get("type") == "http.request" and not msg.get("more_body", True): body_complete = True return msg # First receive gets the body msg1 = await receive() assert msg1["type"] == "http.request" # Second receive should get disconnect msg2 = await receive() assert msg2["type"] == "http.disconnect" class TestASGIDisconnectGracePeriod: """Test the grace period configuration.""" def test_default_grace_period(self): """Test that the default grace period is reasonable.""" from gunicorn.config import Config cfg = Config() assert cfg.asgi_disconnect_grace_period == 3 benoitc-gunicorn-f5fb19e/tests/test_asgi_http_scope.py000066400000000000000000000620711514360242400234170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI HTTP scope validation tests. Tests for HTTP scope building, URL encoding, header handling, and extension support. """ from unittest import mock import pytest from gunicorn.config import Config # ============================================================================ # HTTP Scope Building Tests # ============================================================================ class TestHTTPScopeBuilding: """Tests for _build_http_scope method.""" def _create_protocol(self, **config_kwargs): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() for key, value in config_kwargs.items(): worker.cfg.set(key, value) worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) # Optionally add HTTP/2 priority attributes if "priority_weight" in kwargs: request.priority_weight = kwargs["priority_weight"] request.priority_depends_on = kwargs.get("priority_depends_on", 0) return request def test_basic_scope_structure(self): """Test basic HTTP scope structure.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("192.168.1.100", 54321), ) # All required keys should be present required_keys = [ "type", "asgi", "http_version", "method", "scheme", "path", "raw_path", "query_string", "root_path", "headers", "server", "client", ] for key in required_keys: assert key in scope, f"Missing required key: {key}" def test_root_path_configuration(self): """Test root_path from configuration.""" protocol = self._create_protocol(root_path="/api/v1") request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert scope["root_path"] == "/api/v1" def test_root_path_default_empty(self): """Test root_path defaults to empty string.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert scope["root_path"] == "" # ============================================================================ # Path Handling Tests # ============================================================================ class TestPathHandling: """Tests for path handling in HTTP scope.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) return request def test_simple_path(self): """Test simple path handling.""" protocol = self._create_protocol() request = self._create_mock_request(path="/users") scope = protocol._build_http_scope(request, None, None) assert scope["path"] == "/users" assert scope["raw_path"] == b"/users" def test_path_with_unicode(self): """Test path with unicode characters.""" protocol = self._create_protocol() # Latin-1 encodable characters request = self._create_mock_request(path="/caf\xe9") scope = protocol._build_http_scope(request, None, None) assert scope["path"] == "/caf\xe9" assert scope["raw_path"] == b"/caf\xe9" def test_nested_path(self): """Test nested path handling.""" protocol = self._create_protocol() request = self._create_mock_request(path="/api/v1/users/123/posts") scope = protocol._build_http_scope(request, None, None) assert scope["path"] == "/api/v1/users/123/posts" def test_root_path_only(self): """Test root path only.""" protocol = self._create_protocol() request = self._create_mock_request(path="/") scope = protocol._build_http_scope(request, None, None) assert scope["path"] == "/" assert scope["raw_path"] == b"/" def test_empty_path(self): """Test empty path handling.""" protocol = self._create_protocol() request = self._create_mock_request(path="") scope = protocol._build_http_scope(request, None, None) assert scope["path"] == "" assert scope["raw_path"] == b"" # ============================================================================ # Query String Tests # ============================================================================ class TestQueryStringHandling: """Tests for query string handling in HTTP scope.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) return request def test_simple_query_string(self): """Test simple query string.""" protocol = self._create_protocol() request = self._create_mock_request(query="page=1") scope = protocol._build_http_scope(request, None, None) assert scope["query_string"] == b"page=1" def test_multiple_query_params(self): """Test multiple query parameters.""" protocol = self._create_protocol() request = self._create_mock_request(query="page=1&limit=10&sort=name") scope = protocol._build_http_scope(request, None, None) assert scope["query_string"] == b"page=1&limit=10&sort=name" def test_empty_query_string(self): """Test empty query string.""" protocol = self._create_protocol() request = self._create_mock_request(query="") scope = protocol._build_http_scope(request, None, None) assert scope["query_string"] == b"" def test_query_with_special_characters(self): """Test query string with special characters.""" protocol = self._create_protocol() request = self._create_mock_request(query="name=John%20Doe&email=test%40example.com") scope = protocol._build_http_scope(request, None, None) # Query string should be preserved as-is (URL encoded) assert scope["query_string"] == b"name=John%20Doe&email=test%40example.com" def test_query_with_unicode(self): """Test query string with unicode (Latin-1 encodable).""" protocol = self._create_protocol() request = self._create_mock_request(query="city=caf\xe9") scope = protocol._build_http_scope(request, None, None) assert scope["query_string"] == b"city=caf\xe9" # ============================================================================ # Header Handling Tests # ============================================================================ class TestHeaderHandling: """Tests for header handling in HTTP scope.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) return request def test_headers_converted_to_bytes(self): """Test that headers are converted to bytes tuples.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[("HOST", "localhost"), ("ACCEPT", "text/html")] ) scope = protocol._build_http_scope(request, None, None) for name, value in scope["headers"]: assert isinstance(name, bytes) assert isinstance(value, bytes) def test_headers_lowercase(self): """Test that header names are lowercased.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[("HOST", "localhost"), ("Content-Type", "application/json")] ) scope = protocol._build_http_scope(request, None, None) header_names = [name for name, _ in scope["headers"]] assert b"host" in header_names assert b"content-type" in header_names def test_multiple_headers_same_name(self): """Test multiple headers with the same name.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[ ("ACCEPT", "text/html"), ("ACCEPT", "application/json"), ] ) scope = protocol._build_http_scope(request, None, None) accept_headers = [value for name, value in scope["headers"] if name == b"accept"] assert len(accept_headers) == 2 def test_empty_headers(self): """Test empty headers list.""" protocol = self._create_protocol() request = self._create_mock_request(headers=[]) scope = protocol._build_http_scope(request, None, None) assert scope["headers"] == [] def test_header_value_with_special_chars(self): """Test header values with special characters.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[("USER-AGENT", "Mozilla/5.0 (compatible; bot/1.0)")] ) scope = protocol._build_http_scope(request, None, None) user_agent = [v for n, v in scope["headers"] if n == b"user-agent"][0] assert user_agent == b"Mozilla/5.0 (compatible; bot/1.0)" # ============================================================================ # WebSocket Scope Tests # ============================================================================ class TestWebSocketScope: """Tests for WebSocket scope building.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock WebSocket upgrade request.""" request = mock.Mock() request.method = "GET" request.path = kwargs.get("path", "/ws") request.query = kwargs.get("query", "") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", [ ("HOST", "localhost"), ("UPGRADE", "websocket"), ("CONNECTION", "upgrade"), ("SEC-WEBSOCKET-KEY", "dGhlIHNhbXBsZSBub25jZQ=="), ("SEC-WEBSOCKET-VERSION", "13"), ]) return request def test_websocket_scope_type(self): """Test WebSocket scope type.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_websocket_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert scope["type"] == "websocket" def test_websocket_scheme_ws(self): """Test WebSocket scheme for HTTP.""" protocol = self._create_protocol() request = self._create_mock_request(scheme="http") scope = protocol._build_websocket_scope(request, None, None) assert scope["scheme"] == "ws" def test_websocket_scheme_wss(self): """Test WebSocket scheme for HTTPS.""" protocol = self._create_protocol() request = self._create_mock_request(scheme="https") scope = protocol._build_websocket_scope(request, None, None) assert scope["scheme"] == "wss" def test_websocket_subprotocols(self): """Test WebSocket subprotocol extraction.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[ ("HOST", "localhost"), ("UPGRADE", "websocket"), ("CONNECTION", "upgrade"), ("SEC-WEBSOCKET-KEY", "dGhlIHNhbXBsZSBub25jZQ=="), ("SEC-WEBSOCKET-VERSION", "13"), ("SEC-WEBSOCKET-PROTOCOL", "graphql-ws, subscriptions-transport-ws"), ] ) scope = protocol._build_websocket_scope(request, None, None) assert "subprotocols" in scope assert "graphql-ws" in scope["subprotocols"] assert "subscriptions-transport-ws" in scope["subprotocols"] def test_websocket_no_subprotocols(self): """Test WebSocket scope without subprotocols.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_websocket_scope(request, None, None) assert "subprotocols" in scope assert scope["subprotocols"] == [] def test_websocket_asgi_version(self): """Test ASGI version in WebSocket scope.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_websocket_scope(request, None, None) assert "asgi" in scope assert scope["asgi"]["version"] == "3.0" def test_websocket_required_keys(self): """Test all required keys are present in WebSocket scope.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_websocket_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) required_keys = [ "type", "asgi", "http_version", "scheme", "path", "raw_path", "query_string", "root_path", "headers", "server", "client", "subprotocols", ] for key in required_keys: assert key in scope, f"Missing required key: {key}" # ============================================================================ # HTTP/2 Scope Tests # ============================================================================ class TestHTTP2Scope: """Tests for HTTP/2 scope building.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_http2_request(self, **kwargs): """Create a mock HTTP/2 request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.uri = kwargs.get("uri", "/") request.scheme = kwargs.get("scheme", "https") request.headers = kwargs.get("headers", []) request.priority_weight = kwargs.get("priority_weight", 16) request.priority_depends_on = kwargs.get("priority_depends_on", 0) return request def test_http2_version_string(self): """Test HTTP/2 version string in scope.""" protocol = self._create_protocol() request = self._create_mock_http2_request() scope = protocol._build_http2_scope(request, None, None) assert scope["http_version"] == "2" def test_http2_priority_extension(self): """Test HTTP/2 priority extension.""" protocol = self._create_protocol() request = self._create_mock_http2_request( priority_weight=256, priority_depends_on=5, ) scope = protocol._build_http2_scope(request, None, None) assert "extensions" in scope assert "http.response.priority" in scope["extensions"] priority = scope["extensions"]["http.response.priority"] assert priority["weight"] == 256 assert priority["depends_on"] == 5 def test_http2_trailers_extension(self): """Test HTTP/2 trailers extension present.""" protocol = self._create_protocol() request = self._create_mock_http2_request() scope = protocol._build_http2_scope(request, None, None) assert "extensions" in scope assert "http.response.trailers" in scope["extensions"] def test_http2_scope_required_keys(self): """Test all required keys in HTTP/2 scope.""" protocol = self._create_protocol() request = self._create_mock_http2_request() scope = protocol._build_http2_scope( request, ("127.0.0.1", 8443), ("127.0.0.1", 12345), ) required_keys = [ "type", "asgi", "http_version", "method", "scheme", "path", "raw_path", "query_string", "root_path", "headers", "server", "client", "extensions", ] for key in required_keys: assert key in scope, f"Missing required key: {key}" # ============================================================================ # Server/Client Address Tests # ============================================================================ class TestAddressHandling: """Tests for server and client address handling.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self): """Create a mock HTTP request.""" request = mock.Mock() request.method = "GET" request.path = "/" request.query = "" request.version = (1, 1) request.scheme = "http" request.headers = [] return request def test_ipv4_addresses(self): """Test IPv4 server and client addresses.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("192.168.1.1", 8000), ("192.168.1.100", 54321), ) assert scope["server"] == ("192.168.1.1", 8000) assert scope["client"] == ("192.168.1.100", 54321) def test_ipv6_addresses(self): """Test IPv6 server and client addresses.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("::1", 8000), ("::1", 54321), ) assert scope["server"] == ("::1", 8000) assert scope["client"] == ("::1", 54321) def test_localhost_addresses(self): """Test localhost addresses.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert scope["server"] == ("127.0.0.1", 8000) assert scope["client"] == ("127.0.0.1", 12345) def test_addresses_none(self): """Test when addresses are not available.""" protocol = self._create_protocol() request = self._create_mock_request() scope = protocol._build_http_scope(request, None, None) assert scope["server"] is None assert scope["client"] is None # ============================================================================ # Environ Building Tests (for access logging) # ============================================================================ class TestEnvironBuilding: """Tests for environ dict building (used for access logging).""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, **kwargs): """Create a mock HTTP request.""" request = mock.Mock() request.method = kwargs.get("method", "GET") request.path = kwargs.get("path", "/") request.query = kwargs.get("query", "") request.uri = kwargs.get("uri", "/") request.version = kwargs.get("version", (1, 1)) request.scheme = kwargs.get("scheme", "http") request.headers = kwargs.get("headers", []) return request def test_environ_request_method(self): """Test REQUEST_METHOD in environ.""" protocol = self._create_protocol() request = self._create_mock_request(method="POST") environ = protocol._build_environ( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) assert environ["REQUEST_METHOD"] == "POST" def test_environ_raw_uri(self): """Test RAW_URI in environ.""" protocol = self._create_protocol() request = self._create_mock_request(uri="/api/users?page=1") environ = protocol._build_environ(request, None, None) assert environ["RAW_URI"] == "/api/users?page=1" def test_environ_path_info(self): """Test PATH_INFO in environ.""" protocol = self._create_protocol() request = self._create_mock_request(path="/api/users") environ = protocol._build_environ(request, None, None) assert environ["PATH_INFO"] == "/api/users" def test_environ_query_string(self): """Test QUERY_STRING in environ.""" protocol = self._create_protocol() request = self._create_mock_request(query="page=1&limit=10") environ = protocol._build_environ(request, None, None) assert environ["QUERY_STRING"] == "page=1&limit=10" def test_environ_server_protocol(self): """Test SERVER_PROTOCOL in environ.""" protocol = self._create_protocol() request = self._create_mock_request(version=(1, 1)) environ = protocol._build_environ(request, None, None) assert environ["SERVER_PROTOCOL"] == "HTTP/1.1" def test_environ_remote_addr(self): """Test REMOTE_ADDR in environ.""" protocol = self._create_protocol() request = self._create_mock_request() environ = protocol._build_environ( request, None, ("192.168.1.100", 54321), ) assert environ["REMOTE_ADDR"] == "192.168.1.100" def test_environ_remote_addr_missing(self): """Test REMOTE_ADDR when peername is None.""" protocol = self._create_protocol() request = self._create_mock_request() environ = protocol._build_environ(request, None, None) assert environ["REMOTE_ADDR"] == "-" def test_environ_http_headers(self): """Test HTTP headers in environ.""" protocol = self._create_protocol() request = self._create_mock_request( headers=[ ("HOST", "localhost:8000"), ("USER-AGENT", "TestClient/1.0"), ("ACCEPT", "application/json"), ] ) environ = protocol._build_environ(request, None, None) assert environ["HTTP_HOST"] == "localhost:8000" # Header names have dashes converted to underscores in environ assert environ["HTTP_USER_AGENT"] == "TestClient/1.0" assert environ["HTTP_ACCEPT"] == "application/json" benoitc-gunicorn-f5fb19e/tests/test_asgi_streaming.py000066400000000000000000000371731514360242400232450ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ ASGI streaming response tests. Tests for chunked transfer encoding, Server-Sent Events (SSE), and streaming response handling. """ import asyncio from unittest import mock import pytest from gunicorn.config import Config # ============================================================================ # Chunked Transfer Encoding Tests # ============================================================================ class TestChunkedTransferEncoding: """Tests for HTTP/1.1 chunked transfer encoding.""" def test_chunked_encoding_format(self): """Test chunked encoding format: size in hex + CRLF + data + CRLF.""" body = b"Hello" chunk = f"{len(body):x}\r\n".encode("latin-1") + body + b"\r\n" assert chunk == b"5\r\nHello\r\n" def test_chunked_encoding_large_chunk(self): """Test chunked encoding with larger data.""" body = b"x" * 1000 chunk = f"{len(body):x}\r\n".encode("latin-1") + body + b"\r\n" # 1000 in hex is 3e8 assert chunk.startswith(b"3e8\r\n") assert chunk.endswith(b"\r\n") def test_chunked_encoding_terminal_chunk(self): """Test terminal chunk (zero-length).""" terminal = b"0\r\n\r\n" # Parse it assert terminal == b"0\r\n\r\n" def test_chunked_encoding_empty_chunk(self): """Test encoding empty body chunk.""" body = b"" chunk = f"{len(body):x}\r\n".encode("latin-1") + body + b"\r\n" assert chunk == b"0\r\n\r\n" def test_chunked_encoding_multiple_chunks(self): """Test multiple chunks in sequence.""" chunks = [] # First chunk body1 = b"Hello, " chunks.append(f"{len(body1):x}\r\n".encode() + body1 + b"\r\n") # Second chunk body2 = b"World!" chunks.append(f"{len(body2):x}\r\n".encode() + body2 + b"\r\n") # Terminal chunk chunks.append(b"0\r\n\r\n") full_response = b"".join(chunks) assert b"7\r\nHello, \r\n" in full_response assert b"6\r\nWorld!\r\n" in full_response assert full_response.endswith(b"0\r\n\r\n") # ============================================================================ # ASGI Streaming Response Tests # ============================================================================ class TestASGIStreamingResponse: """Tests for ASGI streaming response handling.""" def test_streaming_response_more_body_true(self): """Test streaming response with more_body=True.""" messages = [ { "type": "http.response.body", "body": b"chunk1", "more_body": True, }, { "type": "http.response.body", "body": b"chunk2", "more_body": True, }, { "type": "http.response.body", "body": b"chunk3", "more_body": False, }, ] assert messages[0]["more_body"] is True assert messages[1]["more_body"] is True assert messages[2]["more_body"] is False def test_streaming_response_empty_final_chunk(self): """Test streaming response with empty final chunk.""" final_message = { "type": "http.response.body", "body": b"", "more_body": False, } assert final_message["body"] == b"" assert final_message["more_body"] is False def test_response_start_without_content_length(self): """Test response start without Content-Length triggers chunked encoding.""" # When Content-Length is missing, HTTP/1.1 should use chunked encoding message = { "type": "http.response.start", "status": 200, "headers": [ (b"content-type", b"text/plain"), # No content-length header ], } # Check no content-length in headers header_names = [name.lower() for name, _ in message["headers"]] assert b"content-length" not in header_names # ============================================================================ # Server-Sent Events (SSE) Format Tests # ============================================================================ class TestSSEFormat: """Tests for Server-Sent Events format.""" def test_sse_data_event(self): """Test SSE data event format.""" data = "Hello, SSE!" event = f"data: {data}\n\n" assert event == "data: Hello, SSE!\n\n" def test_sse_named_event(self): """Test SSE named event format.""" event_name = "message" data = "Hello" event = f"event: {event_name}\ndata: {data}\n\n" assert "event: message\n" in event assert "data: Hello\n" in event assert event.endswith("\n\n") def test_sse_event_with_id(self): """Test SSE event with ID.""" event_id = "12345" data = "Some data" event = f"id: {event_id}\ndata: {data}\n\n" assert "id: 12345\n" in event def test_sse_multiline_data(self): """Test SSE multiline data.""" lines = ["line1", "line2", "line3"] data_lines = "\n".join(f"data: {line}" for line in lines) event = f"{data_lines}\n\n" assert event == "data: line1\ndata: line2\ndata: line3\n\n" def test_sse_retry_directive(self): """Test SSE retry directive.""" retry_ms = 3000 directive = f"retry: {retry_ms}\n\n" assert directive == "retry: 3000\n\n" def test_sse_comment(self): """Test SSE comment (keep-alive).""" comment = ": keep-alive\n\n" assert comment.startswith(":") def test_sse_content_type(self): """Test SSE Content-Type header.""" headers = [ (b"content-type", b"text/event-stream"), (b"cache-control", b"no-cache"), (b"connection", b"keep-alive"), ] content_type = dict(headers).get(b"content-type") assert content_type == b"text/event-stream" # ============================================================================ # Protocol Send Body Tests # ============================================================================ class TestProtocolSendBody: """Tests for ASGIProtocol._send_body method.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) protocol.transport = mock.Mock() return protocol @pytest.mark.asyncio async def test_send_body_without_chunking(self): """Test sending body without chunked encoding.""" protocol = self._create_protocol() await protocol._send_body(b"Hello, World!", chunked=False) protocol.transport.write.assert_called_once_with(b"Hello, World!") @pytest.mark.asyncio async def test_send_body_with_chunking(self): """Test sending body with chunked encoding.""" protocol = self._create_protocol() await protocol._send_body(b"Hello", chunked=True) # Should write: "5\r\nHello\r\n" protocol.transport.write.assert_called_once() call_arg = protocol.transport.write.call_args[0][0] assert call_arg == b"5\r\nHello\r\n" @pytest.mark.asyncio async def test_send_body_empty_without_chunking(self): """Test sending empty body without chunked encoding.""" protocol = self._create_protocol() await protocol._send_body(b"", chunked=False) # Empty body should not write anything protocol.transport.write.assert_not_called() @pytest.mark.asyncio async def test_send_body_empty_with_chunking(self): """Test sending empty body with chunked encoding.""" protocol = self._create_protocol() await protocol._send_body(b"", chunked=True) # Empty body should not write (terminal chunk handled separately) protocol.transport.write.assert_not_called() # ============================================================================ # Content-Length Detection Tests # ============================================================================ class TestContentLengthDetection: """Tests for Content-Length header detection.""" def test_has_content_length_bytes(self): """Test detecting Content-Length header (bytes).""" headers = [ (b"content-type", b"text/plain"), (b"content-length", b"100"), ] has_cl = any( name.lower() == b"content-length" for name, _ in headers ) assert has_cl is True def test_has_content_length_string(self): """Test detecting Content-Length header (string).""" headers = [ ("content-type", "text/plain"), ("content-length", "100"), ] has_cl = any( name.lower() == "content-length" for name, _ in headers ) assert has_cl is True def test_no_content_length(self): """Test when Content-Length is missing.""" headers = [ (b"content-type", b"text/plain"), ] has_cl = any( name.lower() == b"content-length" for name, _ in headers ) assert has_cl is False def test_content_length_case_insensitive(self): """Test Content-Length detection is case-insensitive.""" headers = [ (b"Content-Length", b"100"), ] has_cl = any( name.lower() == b"content-length" for name, _ in headers ) assert has_cl is True # ============================================================================ # HTTP Version Check for Chunked Encoding # ============================================================================ class TestHTTPVersionForChunked: """Tests for HTTP version requirements for chunked encoding.""" def test_http11_supports_chunked(self): """Test HTTP/1.1 supports chunked encoding.""" version = (1, 1) supports_chunked = version >= (1, 1) assert supports_chunked is True def test_http10_no_chunked(self): """Test HTTP/1.0 does not support chunked encoding.""" version = (1, 0) supports_chunked = version >= (1, 1) assert supports_chunked is False def test_http2_no_chunked(self): """Test HTTP/2 doesn't use chunked encoding (uses framing).""" # HTTP/2 has its own framing mechanism version = (2, 0) # Chunked encoding is not used in HTTP/2 uses_http1_chunked = version[0] == 1 and version >= (1, 1) assert uses_http1_chunked is False # ============================================================================ # Streaming Response Message Sequence Tests # ============================================================================ class TestStreamingMessageSequence: """Tests for valid streaming response message sequences.""" def test_valid_sequence_single_body(self): """Test valid sequence: start -> body (more_body=False).""" messages = [ {"type": "http.response.start", "status": 200, "headers": []}, {"type": "http.response.body", "body": b"Hello", "more_body": False}, ] # First message should be start assert messages[0]["type"] == "http.response.start" # Last body message should have more_body=False assert messages[-1]["type"] == "http.response.body" assert messages[-1]["more_body"] is False def test_valid_sequence_multiple_bodies(self): """Test valid sequence: start -> body (more=True) -> body (more=False).""" messages = [ {"type": "http.response.start", "status": 200, "headers": []}, {"type": "http.response.body", "body": b"chunk1", "more_body": True}, {"type": "http.response.body", "body": b"chunk2", "more_body": True}, {"type": "http.response.body", "body": b"", "more_body": False}, ] # Verify sequence assert messages[0]["type"] == "http.response.start" assert all(m["more_body"] for m in messages[1:-1]) assert messages[-1]["more_body"] is False def test_valid_sequence_with_informational(self): """Test valid sequence with informational response.""" messages = [ { "type": "http.response.informational", "status": 103, "headers": [(b"link", b"; rel=preload")], }, {"type": "http.response.start", "status": 200, "headers": []}, {"type": "http.response.body", "body": b"Hello", "more_body": False}, ] # Informational before start is valid assert messages[0]["type"] == "http.response.informational" assert messages[1]["type"] == "http.response.start" # ============================================================================ # Large Response Tests # ============================================================================ class TestLargeResponses: """Tests for handling large responses.""" def test_chunk_size_encoding(self): """Test chunk size encoding for various sizes.""" test_cases = [ (1, b"1\r\n"), (10, b"a\r\n"), (15, b"f\r\n"), (16, b"10\r\n"), (255, b"ff\r\n"), (256, b"100\r\n"), (1024, b"400\r\n"), (65535, b"ffff\r\n"), (1048576, b"100000\r\n"), # 1MB ] for size, expected in test_cases: chunk_header = f"{size:x}\r\n".encode("latin-1") assert chunk_header == expected, f"Failed for size {size}" def test_megabyte_chunk(self): """Test encoding 1MB chunk.""" size = 1024 * 1024 # 1MB body = b"x" * size chunk = f"{len(body):x}\r\n".encode("latin-1") + body + b"\r\n" # Verify structure assert chunk.startswith(b"100000\r\n") # 1MB in hex assert chunk.endswith(b"\r\n") # Total size: header (8) + body (1048576) + trailer (2) assert len(chunk) == 8 + 1048576 + 2 # ============================================================================ # Transfer-Encoding Header Tests # ============================================================================ class TestTransferEncodingHeader: """Tests for Transfer-Encoding header handling.""" def test_transfer_encoding_chunked(self): """Test Transfer-Encoding: chunked header.""" headers = [(b"transfer-encoding", b"chunked")] te_header = dict(headers).get(b"transfer-encoding") assert te_header == b"chunked" def test_add_transfer_encoding_to_headers(self): """Test adding Transfer-Encoding header to response.""" headers = [ (b"content-type", b"text/plain"), ] # Add chunked encoding headers = list(headers) + [(b"transfer-encoding", b"chunked")] header_names = [name for name, _ in headers] assert b"transfer-encoding" in header_names def test_no_content_length_with_transfer_encoding(self): """Test Content-Length should not be present with Transfer-Encoding.""" # Per HTTP spec, Content-Length must be ignored if Transfer-Encoding present headers = [ (b"content-type", b"text/plain"), (b"transfer-encoding", b"chunked"), ] header_names = [name for name, _ in headers] assert b"content-length" not in header_names benoitc-gunicorn-f5fb19e/tests/test_asgi_uwsgi.py000066400000000000000000000317041514360242400224040ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Tests for ASGI uWSGI protocol parser. """ import pytest from gunicorn.asgi.unreader import AsyncUnreader from gunicorn.asgi.uwsgi import AsyncUWSGIRequest from gunicorn.uwsgi.errors import ( InvalidUWSGIHeader, UnsupportedModifier, ForbiddenUWSGIRequest, ) class MockStreamReader: """Mock asyncio.StreamReader for testing.""" def __init__(self, data): self.data = data self.pos = 0 async def read(self, size=-1): if self.pos >= len(self.data): return b"" if size < 0: result = self.data[self.pos:] self.pos = len(self.data) else: result = self.data[self.pos:self.pos + size] self.pos += size return result class MockConfig: """Mock gunicorn config for testing.""" def __init__(self): self.is_ssl = False self.uwsgi_allow_ips = ['*'] # Allow all for most tests def build_uwsgi_packet(vars_dict, modifier1=0, modifier2=0): """Build a uWSGI packet from a dictionary of variables. Args: vars_dict: Dictionary of uWSGI variables modifier1: uWSGI modifier1 (default 0 for WSGI) modifier2: uWSGI modifier2 (default 0) Returns: bytes: Complete uWSGI packet """ vars_data = b"" for key, value in vars_dict.items(): key_bytes = key.encode('latin-1') value_bytes = value.encode('latin-1') vars_data += len(key_bytes).to_bytes(2, 'little') vars_data += key_bytes vars_data += len(value_bytes).to_bytes(2, 'little') vars_data += value_bytes # Build header: modifier1 (1 byte) + datasize (2 bytes LE) + modifier2 (1 byte) header = bytes([modifier1]) header += len(vars_data).to_bytes(2, 'little') header += bytes([modifier2]) return header + vars_data # Basic parsing tests @pytest.mark.asyncio async def test_parse_simple_get(): """Test parsing a simple GET request.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/test', 'QUERY_STRING': '', 'HTTP_HOST': 'localhost', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.method == "GET" assert request.path == "/test" assert request.query == "" assert request.uri == "/test" assert request.version == (1, 1) @pytest.mark.asyncio async def test_parse_get_with_query(): """Test parsing GET request with query string.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/search', 'QUERY_STRING': 'q=test&page=1', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.method == "GET" assert request.path == "/search" assert request.query == "q=test&page=1" assert request.uri == "/search?q=test&page=1" @pytest.mark.asyncio async def test_parse_post_with_content_length(): """Test parsing POST request with content length.""" body = b"hello=world" vars_dict = { 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/submit', 'CONTENT_LENGTH': str(len(body)), 'CONTENT_TYPE': 'application/x-www-form-urlencoded', } packet = build_uwsgi_packet(vars_dict) + body reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.method == "POST" assert request.path == "/submit" assert request.content_length == len(body) # Read body read_body = await request.read_body(100) assert read_body == body @pytest.mark.asyncio async def test_parse_headers(): """Test that HTTP headers are correctly extracted.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTP_HOST': 'example.com', 'HTTP_ACCEPT': 'text/html', 'HTTP_X_CUSTOM_HEADER': 'custom-value', 'CONTENT_TYPE': 'text/plain', 'CONTENT_LENGTH': '0', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) # Check headers were extracted correctly assert request.get_header('HOST') == 'example.com' assert request.get_header('ACCEPT') == 'text/html' assert request.get_header('X-CUSTOM-HEADER') == 'custom-value' assert request.get_header('CONTENT-TYPE') == 'text/plain' assert request.get_header('CONTENT-LENGTH') == '0' @pytest.mark.asyncio async def test_parse_https_scheme(): """Test HTTPS scheme detection.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTPS': 'on', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.scheme == 'https' @pytest.mark.asyncio async def test_parse_wsgi_url_scheme(): """Test wsgi.url_scheme variable.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'wsgi.url_scheme': 'https', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.scheme == 'https' # Body reading tests @pytest.mark.asyncio async def test_read_body_chunks(): """Test reading body in chunks.""" body = b"a" * 100 vars_dict = { 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/', 'CONTENT_LENGTH': str(len(body)), } packet = build_uwsgi_packet(vars_dict) + body reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) # Read in chunks chunks = [] while True: chunk = await request.read_body(30) if not chunk: break chunks.append(chunk) assert b"".join(chunks) == body @pytest.mark.asyncio async def test_drain_body(): """Test draining unread body.""" body = b"x" * 50 vars_dict = { 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/', 'CONTENT_LENGTH': str(len(body)), } packet = build_uwsgi_packet(vars_dict) + body reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) # Drain without reading await request.drain_body() # Further reads should return empty chunk = await request.read_body() assert chunk == b"" @pytest.mark.asyncio async def test_no_body(): """Test request with no body.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.content_length == 0 chunk = await request.read_body() assert chunk == b"" # Connection handling tests @pytest.mark.asyncio async def test_should_close_default(): """Test default keepalive behavior.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) # Default should be keep-alive (HTTP/1.1 behavior) assert request.should_close() is False @pytest.mark.asyncio async def test_should_close_connection_close(): """Test connection close header.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTP_CONNECTION': 'close', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.should_close() is True @pytest.mark.asyncio async def test_should_close_keepalive(): """Test connection keep-alive header.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTP_CONNECTION': 'keep-alive', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.should_close() is False # Error handling tests @pytest.mark.asyncio async def test_incomplete_header(): """Test incomplete header raises error.""" # Only 2 bytes instead of 4 data = b"\x00\x00" reader = MockStreamReader(data) unreader = AsyncUnreader(reader) cfg = MockConfig() with pytest.raises(InvalidUWSGIHeader): await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) @pytest.mark.asyncio async def test_unsupported_modifier(): """Test unsupported modifier1 raises error.""" # modifier1 = 1 (not WSGI) header = bytes([1, 0, 0, 0]) # modifier1=1, datasize=0, modifier2=0 reader = MockStreamReader(header) unreader = AsyncUnreader(reader) cfg = MockConfig() with pytest.raises(UnsupportedModifier): await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) @pytest.mark.asyncio async def test_incomplete_vars_block(): """Test incomplete vars block raises error.""" # Header says 100 bytes of vars, but only 10 provided header = bytes([0]) # modifier1=0 header += (100).to_bytes(2, 'little') # datasize=100 header += bytes([0]) # modifier2=0 header += b"x" * 10 # Only 10 bytes reader = MockStreamReader(header) unreader = AsyncUnreader(reader) cfg = MockConfig() with pytest.raises(InvalidUWSGIHeader): await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) @pytest.mark.asyncio async def test_forbidden_ip(): """Test forbidden IP raises error.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() cfg.uwsgi_allow_ips = ['10.0.0.1'] # Only allow 10.0.0.1 with pytest.raises(ForbiddenUWSGIRequest): await AsyncUWSGIRequest.parse(cfg, unreader, ("192.168.1.1", 8000)) @pytest.mark.asyncio async def test_allowed_ip(): """Test allowed IP succeeds.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() cfg.uwsgi_allow_ips = ['192.168.1.1'] # Should not raise request = await AsyncUWSGIRequest.parse(cfg, unreader, ("192.168.1.1", 8000)) assert request.method == "GET" @pytest.mark.asyncio async def test_unix_socket_allowed(): """Test UNIX socket connections are always allowed.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() cfg.uwsgi_allow_ips = ['10.0.0.1'] # Restrictive IP list # UNIX socket peer_addr is not a tuple request = await AsyncUWSGIRequest.parse(cfg, unreader, "/tmp/gunicorn.sock") assert request.method == "GET" # Empty vars block test @pytest.mark.asyncio async def test_empty_vars_block(): """Test request with empty vars block uses defaults.""" # Header with datasize=0 header = bytes([0, 0, 0, 0]) reader = MockStreamReader(header) unreader = AsyncUnreader(reader) cfg = MockConfig() request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) # Should use defaults assert request.method == "GET" assert request.path == "/" assert request.query == "" # SSL config test @pytest.mark.asyncio async def test_ssl_config_scheme(): """Test SSL config sets https scheme.""" vars_dict = { 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', } packet = build_uwsgi_packet(vars_dict) reader = MockStreamReader(packet) unreader = AsyncUnreader(reader) cfg = MockConfig() cfg.is_ssl = True request = await AsyncUWSGIRequest.parse(cfg, unreader, ("127.0.0.1", 8000)) assert request.scheme == 'https' benoitc-gunicorn-f5fb19e/tests/test_asgi_websocket_protocol.py000066400000000000000000000557371514360242400251710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ WebSocket RFC 6455 compliance tests. Tests that gunicorn's WebSocket implementation conforms to RFC 6455: https://tools.ietf.org/html/rfc6455 """ import asyncio import base64 import hashlib import struct from unittest import mock import pytest # ============================================================================ # WebSocket Constants Tests # ============================================================================ class TestWebSocketConstants: """Tests for WebSocket protocol constants.""" def test_websocket_guid(self): """Test WebSocket GUID per RFC 6455 Section 1.3.""" from gunicorn.asgi.websocket import WS_GUID # The GUID is a fixed value specified in RFC 6455 assert WS_GUID == b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11" def test_opcode_continuation(self): """Test continuation frame opcode (0x0).""" from gunicorn.asgi.websocket import OPCODE_CONTINUATION assert OPCODE_CONTINUATION == 0x0 def test_opcode_text(self): """Test text frame opcode (0x1).""" from gunicorn.asgi.websocket import OPCODE_TEXT assert OPCODE_TEXT == 0x1 def test_opcode_binary(self): """Test binary frame opcode (0x2).""" from gunicorn.asgi.websocket import OPCODE_BINARY assert OPCODE_BINARY == 0x2 def test_opcode_close(self): """Test close frame opcode (0x8).""" from gunicorn.asgi.websocket import OPCODE_CLOSE assert OPCODE_CLOSE == 0x8 def test_opcode_ping(self): """Test ping frame opcode (0x9).""" from gunicorn.asgi.websocket import OPCODE_PING assert OPCODE_PING == 0x9 def test_opcode_pong(self): """Test pong frame opcode (0xA).""" from gunicorn.asgi.websocket import OPCODE_PONG assert OPCODE_PONG == 0xA # ============================================================================ # WebSocket Close Codes Tests (RFC 6455 Section 7.4.1) # ============================================================================ class TestWebSocketCloseCodes: """Tests for WebSocket close status codes.""" def test_close_normal(self): """Test normal closure code (1000).""" from gunicorn.asgi.websocket import CLOSE_NORMAL assert CLOSE_NORMAL == 1000 def test_close_going_away(self): """Test going away code (1001).""" from gunicorn.asgi.websocket import CLOSE_GOING_AWAY assert CLOSE_GOING_AWAY == 1001 def test_close_protocol_error(self): """Test protocol error code (1002).""" from gunicorn.asgi.websocket import CLOSE_PROTOCOL_ERROR assert CLOSE_PROTOCOL_ERROR == 1002 def test_close_unsupported(self): """Test unsupported data code (1003).""" from gunicorn.asgi.websocket import CLOSE_UNSUPPORTED assert CLOSE_UNSUPPORTED == 1003 def test_close_no_status(self): """Test no status received code (1005).""" from gunicorn.asgi.websocket import CLOSE_NO_STATUS assert CLOSE_NO_STATUS == 1005 def test_close_abnormal(self): """Test abnormal closure code (1006).""" from gunicorn.asgi.websocket import CLOSE_ABNORMAL assert CLOSE_ABNORMAL == 1006 def test_close_invalid_data(self): """Test invalid frame payload data code (1007).""" from gunicorn.asgi.websocket import CLOSE_INVALID_DATA assert CLOSE_INVALID_DATA == 1007 def test_close_policy_violation(self): """Test policy violation code (1008).""" from gunicorn.asgi.websocket import CLOSE_POLICY_VIOLATION assert CLOSE_POLICY_VIOLATION == 1008 def test_close_message_too_big(self): """Test message too big code (1009).""" from gunicorn.asgi.websocket import CLOSE_MESSAGE_TOO_BIG assert CLOSE_MESSAGE_TOO_BIG == 1009 def test_close_mandatory_ext(self): """Test mandatory extension code (1010).""" from gunicorn.asgi.websocket import CLOSE_MANDATORY_EXT assert CLOSE_MANDATORY_EXT == 1010 def test_close_internal_error(self): """Test internal server error code (1011).""" from gunicorn.asgi.websocket import CLOSE_INTERNAL_ERROR assert CLOSE_INTERNAL_ERROR == 1011 # ============================================================================ # WebSocket Handshake Tests (RFC 6455 Section 4.2.2) # ============================================================================ class TestWebSocketHandshake: """Tests for WebSocket handshake implementation.""" def test_accept_key_calculation(self): """Test Sec-WebSocket-Accept key calculation per RFC 6455.""" from gunicorn.asgi.websocket import WS_GUID # Example from RFC 6455 Section 1.3 client_key = b"dGhlIHNhbXBsZSBub25jZQ==" expected_accept = "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=" # Calculation: Base64(SHA-1(client_key + GUID)) accept_key = base64.b64encode( hashlib.sha1(client_key + WS_GUID).digest() ).decode("ascii") assert accept_key == expected_accept def test_accept_key_another_example(self): """Test accept key calculation with another key.""" from gunicorn.asgi.websocket import WS_GUID # Another example key client_key = b"x3JJHMbDL1EzLkh9GBhXDw==" accept_key = base64.b64encode( hashlib.sha1(client_key + WS_GUID).digest() ).decode("ascii") # Verify it's a valid base64 string assert len(accept_key) == 28 # SHA-1 hash is 20 bytes, base64 encoded # Verify we can decode it decoded = base64.b64decode(accept_key) assert len(decoded) == 20 # SHA-1 produces 20 bytes # ============================================================================ # WebSocket Frame Masking Tests (RFC 6455 Section 5.3) # ============================================================================ class TestWebSocketFrameMasking: """Tests for WebSocket frame masking/unmasking.""" def _create_protocol(self): """Create a WebSocketProtocol instance for testing.""" from gunicorn.asgi.websocket import WebSocketProtocol return WebSocketProtocol(None, None, {}, None, mock.Mock()) def test_unmask_simple(self): """Test basic unmasking operation.""" protocol = self._create_protocol() # Mask key and masked "Hello" masking_key = bytes([0x37, 0xfa, 0x21, 0x3d]) # H=0x48, e=0x65, l=0x6c, l=0x6c, o=0x6f # Masked: 0x48^0x37=0x7f, 0x65^0xfa=0x9f, 0x6c^0x21=0x4d, 0x6c^0x3d=0x51, 0x6f^0x37=0x58 masked_data = bytes([0x7f, 0x9f, 0x4d, 0x51, 0x58]) unmasked = protocol._unmask(masked_data, masking_key) assert unmasked == b"Hello" def test_unmask_empty(self): """Test unmasking empty payload.""" protocol = self._create_protocol() masking_key = bytes([0x37, 0xfa, 0x21, 0x3d]) unmasked = protocol._unmask(b"", masking_key) assert unmasked == b"" def test_unmask_longer_message(self): """Test unmasking message longer than mask key.""" protocol = self._create_protocol() # The mask cycles every 4 bytes masking_key = bytes([0x01, 0x02, 0x03, 0x04]) message = b"12345678" # 8 bytes # Manually mask masked = bytes(b ^ masking_key[i % 4] for i, b in enumerate(message)) # Unmask should give back original unmasked = protocol._unmask(masked, masking_key) assert unmasked == message def test_unmask_binary_data(self): """Test unmasking binary data.""" protocol = self._create_protocol() masking_key = bytes([0xAB, 0xCD, 0xEF, 0x01]) original = bytes([0x00, 0xFF, 0x80, 0x7F, 0x01]) # Mask the data masked = bytes(b ^ masking_key[i % 4] for i, b in enumerate(original)) # Unmask should give back original unmasked = protocol._unmask(masked, masking_key) assert unmasked == original # ============================================================================ # WebSocket Frame Format Tests (RFC 6455 Section 5.2) # ============================================================================ class TestWebSocketFrameFormat: """Tests for WebSocket frame format handling.""" def test_frame_header_structure(self): """Test understanding of WebSocket frame header structure.""" # First byte: FIN(1) + RSV1(1) + RSV2(1) + RSV3(1) + OPCODE(4) # Second byte: MASK(1) + PAYLOAD_LEN(7) # Text frame, FIN=1, no RSV bits, opcode=0x1 first_byte = 0b10000001 # 0x81 assert (first_byte >> 7) & 1 == 1 # FIN assert (first_byte >> 6) & 1 == 0 # RSV1 assert (first_byte >> 5) & 1 == 0 # RSV2 assert (first_byte >> 4) & 1 == 0 # RSV3 assert first_byte & 0x0F == 1 # OPCODE (text) def test_payload_length_7bit(self): """Test 7-bit payload length encoding (0-125).""" # Payload length 100 second_byte = 0b10000000 | 100 # MASK=1, length=100 assert (second_byte >> 7) & 1 == 1 # MASK bit assert second_byte & 0x7F == 100 # Length def test_payload_length_16bit(self): """Test 16-bit payload length encoding (126 indicator).""" # Length 126 indicates next 2 bytes contain the length second_byte = 0b10000000 | 126 # MASK=1, length indicator=126 assert second_byte & 0x7F == 126 # Extended length as big-endian 16-bit extended_length = 1000 packed = struct.pack("!H", extended_length) assert struct.unpack("!H", packed)[0] == 1000 def test_payload_length_64bit(self): """Test 64-bit payload length encoding (127 indicator).""" # Length 127 indicates next 8 bytes contain the length second_byte = 0b10000000 | 127 # MASK=1, length indicator=127 assert second_byte & 0x7F == 127 # Extended length as big-endian 64-bit extended_length = 100000 packed = struct.pack("!Q", extended_length) assert struct.unpack("!Q", packed)[0] == 100000 # ============================================================================ # WebSocket Protocol Instance Tests # ============================================================================ class TestWebSocketProtocolInstance: """Tests for WebSocketProtocol instance state.""" def _create_protocol(self, scope=None): """Create a WebSocketProtocol instance.""" from gunicorn.asgi.websocket import WebSocketProtocol if scope is None: scope = { "type": "websocket", "headers": [], } return WebSocketProtocol( transport=mock.Mock(), reader=mock.Mock(), scope=scope, app=mock.AsyncMock(), log=mock.Mock(), ) def test_initial_state(self): """Test initial protocol state.""" protocol = self._create_protocol() assert protocol.accepted is False assert protocol.closed is False assert protocol.close_code is None assert protocol.close_reason == "" def test_fragment_state_initial(self): """Test initial fragment reassembly state.""" protocol = self._create_protocol() assert protocol._fragments == [] assert protocol._fragment_opcode is None # ============================================================================ # WebSocket ASGI Message Format Tests # ============================================================================ class TestWebSocketASGIMessages: """Tests for WebSocket ASGI message formats.""" def test_websocket_connect_message(self): """Test websocket.connect message format.""" message = {"type": "websocket.connect"} assert message["type"] == "websocket.connect" def test_websocket_accept_message(self): """Test websocket.accept message format.""" message = { "type": "websocket.accept", "subprotocol": "graphql-ws", "headers": [ (b"x-custom-header", b"value"), ], } assert message["type"] == "websocket.accept" assert message["subprotocol"] == "graphql-ws" def test_websocket_accept_minimal(self): """Test minimal websocket.accept message.""" message = {"type": "websocket.accept"} assert message["type"] == "websocket.accept" def test_websocket_receive_text_message(self): """Test websocket.receive message with text.""" message = { "type": "websocket.receive", "text": "Hello, WebSocket!", } assert message["type"] == "websocket.receive" assert "text" in message assert isinstance(message["text"], str) def test_websocket_receive_binary_message(self): """Test websocket.receive message with binary data.""" message = { "type": "websocket.receive", "bytes": b"\x00\x01\x02\x03", } assert message["type"] == "websocket.receive" assert "bytes" in message assert isinstance(message["bytes"], bytes) def test_websocket_send_text_message(self): """Test websocket.send message with text.""" message = { "type": "websocket.send", "text": "Response text", } assert message["type"] == "websocket.send" assert message["text"] == "Response text" def test_websocket_send_binary_message(self): """Test websocket.send message with binary.""" message = { "type": "websocket.send", "bytes": b"\xFF\xFE\xFD", } assert message["type"] == "websocket.send" assert message["bytes"] == b"\xFF\xFE\xFD" def test_websocket_disconnect_message(self): """Test websocket.disconnect message format.""" message = { "type": "websocket.disconnect", "code": 1000, } assert message["type"] == "websocket.disconnect" assert message["code"] == 1000 def test_websocket_close_message(self): """Test websocket.close message format.""" message = { "type": "websocket.close", "code": 1000, "reason": "Normal closure", } assert message["type"] == "websocket.close" assert message["code"] == 1000 assert message["reason"] == "Normal closure" # ============================================================================ # WebSocket Upgrade Detection Tests # ============================================================================ class TestWebSocketUpgradeDetection: """Tests for WebSocket upgrade request detection.""" def _create_protocol(self): """Create an ASGIProtocol instance for testing.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() from gunicorn.config import Config worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() return ASGIProtocol(worker) def _create_mock_request(self, method="GET", headers=None): """Create a mock HTTP request.""" request = mock.Mock() request.method = method request.headers = headers or [] return request def test_valid_websocket_upgrade(self): """Test detection of valid WebSocket upgrade request.""" protocol = self._create_protocol() request = self._create_mock_request( method="GET", headers=[ ("UPGRADE", "websocket"), ("CONNECTION", "upgrade"), ] ) assert protocol._is_websocket_upgrade(request) is True def test_websocket_upgrade_case_insensitive(self): """Test WebSocket upgrade detection is case-insensitive.""" protocol = self._create_protocol() request = self._create_mock_request( method="GET", headers=[ ("UPGRADE", "WebSocket"), ("CONNECTION", "Upgrade"), ] ) assert protocol._is_websocket_upgrade(request) is True def test_websocket_upgrade_connection_with_keep_alive(self): """Test WebSocket upgrade with Connection: upgrade, keep-alive.""" protocol = self._create_protocol() request = self._create_mock_request( method="GET", headers=[ ("UPGRADE", "websocket"), ("CONNECTION", "upgrade, keep-alive"), ] ) assert protocol._is_websocket_upgrade(request) is True def test_not_websocket_wrong_method(self): """Test non-GET methods are not WebSocket upgrades.""" protocol = self._create_protocol() for method in ["POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"]: request = self._create_mock_request( method=method, headers=[ ("UPGRADE", "websocket"), ("CONNECTION", "upgrade"), ] ) assert protocol._is_websocket_upgrade(request) is False def test_not_websocket_missing_upgrade(self): """Test missing Upgrade header.""" protocol = self._create_protocol() request = self._create_mock_request( method="GET", headers=[ ("CONNECTION", "upgrade"), ] ) assert protocol._is_websocket_upgrade(request) is False def test_not_websocket_missing_connection(self): """Test missing Connection header.""" protocol = self._create_protocol() request = self._create_mock_request( method="GET", headers=[ ("UPGRADE", "websocket"), ] ) # Result should be falsy (None or False) when Connection header is missing assert not protocol._is_websocket_upgrade(request) def test_not_websocket_wrong_upgrade_value(self): """Test Upgrade header with wrong value.""" protocol = self._create_protocol() request = self._create_mock_request( method="GET", headers=[ ("UPGRADE", "h2c"), ("CONNECTION", "upgrade"), ] ) assert protocol._is_websocket_upgrade(request) is False # ============================================================================ # WebSocket Close Frame Tests # ============================================================================ class TestWebSocketCloseFrame: """Tests for WebSocket close frame handling.""" def test_close_frame_payload_format(self): """Test close frame payload format (code + reason).""" from gunicorn.asgi.websocket import CLOSE_NORMAL code = CLOSE_NORMAL reason = "Goodbye" # Close frame payload: 2-byte big-endian code + UTF-8 reason payload = struct.pack("!H", code) + reason.encode("utf-8") # Parse it back parsed_code = struct.unpack("!H", payload[:2])[0] parsed_reason = payload[2:].decode("utf-8") assert parsed_code == 1000 assert parsed_reason == "Goodbye" def test_close_frame_empty_reason(self): """Test close frame with empty reason.""" from gunicorn.asgi.websocket import CLOSE_NORMAL payload = struct.pack("!H", CLOSE_NORMAL) parsed_code = struct.unpack("!H", payload[:2])[0] parsed_reason = payload[2:].decode("utf-8") assert parsed_code == 1000 assert parsed_reason == "" def test_close_frame_max_reason_length(self): """Test close frame reason max length (125 - 2 = 123 bytes).""" from gunicorn.asgi.websocket import CLOSE_NORMAL # Control frames have max 125 bytes payload # 2 bytes for code, leaving 123 for reason max_reason = "x" * 123 payload = struct.pack("!H", CLOSE_NORMAL) + max_reason.encode("utf-8") assert len(payload) == 125 # Max control frame payload # ============================================================================ # Async WebSocket Tests # ============================================================================ class TestWebSocketAsync: """Async tests for WebSocket protocol.""" def _create_protocol(self, scope=None): """Create a WebSocketProtocol instance.""" from gunicorn.asgi.websocket import WebSocketProtocol if scope is None: scope = { "type": "websocket", "headers": [(b"sec-websocket-key", b"dGhlIHNhbXBsZSBub25jZQ==")], } transport = mock.Mock() reader = mock.Mock() return WebSocketProtocol( transport=transport, reader=reader, scope=scope, app=mock.AsyncMock(), log=mock.Mock(), ) @pytest.mark.asyncio async def test_receive_returns_from_queue(self): """Test that _receive returns items from queue.""" protocol = self._create_protocol() # Put a message on the queue await protocol._receive_queue.put({"type": "websocket.connect"}) # Receive should return it message = await protocol._receive() assert message["type"] == "websocket.connect" @pytest.mark.asyncio async def test_send_accept_sets_flag(self): """Test that sending accept sets the accepted flag.""" protocol = self._create_protocol() # Configure mock transport protocol.transport.write = mock.Mock() await protocol._send({"type": "websocket.accept"}) assert protocol.accepted is True @pytest.mark.asyncio async def test_send_accept_twice_raises(self): """Test that accepting twice raises RuntimeError.""" protocol = self._create_protocol() protocol.transport.write = mock.Mock() await protocol._send({"type": "websocket.accept"}) with pytest.raises(RuntimeError, match="already accepted"): await protocol._send({"type": "websocket.accept"}) @pytest.mark.asyncio async def test_send_before_accept_raises(self): """Test that sending data before accept raises RuntimeError.""" protocol = self._create_protocol() with pytest.raises(RuntimeError, match="not accepted"): await protocol._send({"type": "websocket.send", "text": "hello"}) @pytest.mark.asyncio async def test_send_after_close_raises(self): """Test that sending after close raises RuntimeError.""" protocol = self._create_protocol() protocol.transport.write = mock.Mock() await protocol._send({"type": "websocket.accept"}) protocol.closed = True with pytest.raises(RuntimeError, match="closed"): await protocol._send({"type": "websocket.send", "text": "hello"}) @pytest.mark.asyncio async def test_send_close_sets_flag(self): """Test that sending close sets the closed flag.""" protocol = self._create_protocol() protocol.transport.write = mock.Mock() await protocol._send({"type": "websocket.close", "code": 1000}) assert protocol.closed is True benoitc-gunicorn-f5fb19e/tests/test_asgi_worker.py000066400000000000000000000622351514360242400225620ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Tests for the ASGI worker. Includes unit tests for worker components and integration tests that actually start the server and make HTTP requests. """ import asyncio import errno import os import signal import socket import sys import time import threading from unittest import mock import pytest from gunicorn.config import Config from gunicorn.workers import gasgi # ============================================================================ # Mock Classes # ============================================================================ class FakeSocket: """Mock socket for testing.""" def __init__(self, data=b''): self.data = data self.closed = False self.blocking = True self._fileno = id(self) % 65536 def fileno(self): return self._fileno def setblocking(self, blocking): self.blocking = blocking def recv(self, size): if self.closed: raise OSError(errno.EBADF, "Bad file descriptor") result = self.data[:size] self.data = self.data[size:] return result def send(self, data): if self.closed: raise OSError(errno.EPIPE, "Broken pipe") return len(data) def close(self): self.closed = True def getsockname(self): return ('127.0.0.1', 8000) def getpeername(self): return ('127.0.0.1', 12345) class FakeApp: """Mock ASGI application for testing.""" def __init__(self): self.calls = [] def wsgi(self): return self.asgi_app async def asgi_app(self, scope, receive, send): self.calls.append(scope) if scope["type"] == "lifespan": while True: message = await receive() if message["type"] == "lifespan.startup": await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": await send({"type": "lifespan.shutdown.complete"}) return elif scope["type"] == "http": await send({ "type": "http.response.start", "status": 200, "headers": [(b"content-type", b"text/plain")], }) await send({ "type": "http.response.body", "body": b"Hello from ASGI!", }) class FakeListener: """Mock listener socket.""" def __init__(self): self.sock = FakeSocket() def getsockname(self): return ('127.0.0.1', 8000) def close(self): self.sock.close() def __str__(self): return "http://127.0.0.1:8000" # ============================================================================ # Helper Functions # ============================================================================ def _has_uvloop(): """Check if uvloop is available.""" try: import uvloop return True except ImportError: return False # ============================================================================ # Unit Tests for ASGIWorker # ============================================================================ class TestASGIWorkerInit: """Tests for ASGIWorker initialization.""" def create_worker(self, **kwargs): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('worker_connections', 1000) for key, value in kwargs.items(): cfg.set(key, value) worker = gasgi.ASGIWorker( age=1, ppid=os.getpid(), sockets=[], app=FakeApp(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_worker_init(self): """Test worker initialization.""" worker = self.create_worker() assert worker.worker_connections == 1000 assert worker.nr_conns == 0 assert worker.loop is None assert worker.servers == [] assert worker.state == {} def test_worker_connections_config(self): """Test worker_connections configuration.""" worker = self.create_worker(worker_connections=500) assert worker.worker_connections == 500 class TestASGIWorkerEventLoop: """Tests for event loop setup.""" def create_worker(self, **kwargs): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('worker_connections', 1000) for key, value in kwargs.items(): cfg.set(key, value) worker = gasgi.ASGIWorker( age=1, ppid=os.getpid(), sockets=[], app=FakeApp(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_setup_asyncio_loop(self): """Test asyncio event loop setup.""" worker = self.create_worker(asgi_loop='asyncio') worker._setup_event_loop() assert worker.loop is not None assert isinstance(worker.loop, asyncio.AbstractEventLoop) worker.loop.close() def test_setup_auto_loop_falls_back_to_asyncio(self): """Test that auto mode uses asyncio when uvloop unavailable.""" worker = self.create_worker(asgi_loop='auto') # Mock uvloop import failure with mock.patch.dict('sys.modules', {'uvloop': None}): worker._setup_event_loop() assert worker.loop is not None worker.loop.close() @pytest.mark.skipif( not _has_uvloop(), reason="uvloop not installed" ) def test_setup_uvloop(self): """Test uvloop event loop setup.""" worker = self.create_worker(asgi_loop='uvloop') worker._setup_event_loop() import uvloop assert isinstance(worker.loop, uvloop.Loop) worker.loop.close() class TestASGIWorkerSignals: """Tests for signal handling.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('worker_connections', 1000) cfg.set('graceful_timeout', 5) worker = gasgi.ASGIWorker( age=1, ppid=os.getpid(), sockets=[], app=FakeApp(), timeout=30, cfg=cfg, log=mock.Mock(), ) worker._setup_event_loop() return worker def test_handle_exit_sets_alive_false(self): """Test that exit signal sets alive=False.""" worker = self.create_worker() worker.alive = True worker.handle_exit_signal() assert worker.alive is False worker.loop.close() def test_handle_quit_sets_alive_false(self): """Test that quit signal sets alive=False.""" worker = self.create_worker() worker.alive = True # Mock the worker_int callback on the worker's cfg settings with mock.patch.object(worker.cfg.settings['worker_int'], 'get', return_value=lambda w: None): worker.handle_quit_signal() assert worker.alive is False worker.loop.close() # ============================================================================ # Tests for Lifespan Protocol # ============================================================================ class TestLifespanManager: """Tests for ASGI lifespan protocol.""" @pytest.mark.asyncio async def test_lifespan_startup_complete(self): """Test successful lifespan startup.""" from gunicorn.asgi.lifespan import LifespanManager startup_called = False shutdown_called = False async def app(scope, receive, send): nonlocal startup_called, shutdown_called assert scope["type"] == "lifespan" while True: message = await receive() if message["type"] == "lifespan.startup": startup_called = True await send({"type": "lifespan.startup.complete"}) elif message["type"] == "lifespan.shutdown": shutdown_called = True await send({"type": "lifespan.shutdown.complete"}) return manager = LifespanManager(app, mock.Mock()) await manager.startup() assert startup_called assert manager._startup_complete.is_set() assert not manager._startup_failed await manager.shutdown() assert shutdown_called @pytest.mark.asyncio async def test_lifespan_startup_failed(self): """Test lifespan startup failure.""" from gunicorn.asgi.lifespan import LifespanManager async def app(scope, receive, send): message = await receive() if message["type"] == "lifespan.startup": await send({ "type": "lifespan.startup.failed", "message": "Database connection failed" }) manager = LifespanManager(app, mock.Mock()) with pytest.raises(RuntimeError, match="Database connection failed"): await manager.startup() @pytest.mark.asyncio async def test_lifespan_state_shared(self): """Test that lifespan state is shared with app.""" from gunicorn.asgi.lifespan import LifespanManager state = {} async def app(scope, receive, send): assert "state" in scope scope["state"]["db"] = "connected" message = await receive() await send({"type": "lifespan.startup.complete"}) message = await receive() await send({"type": "lifespan.shutdown.complete"}) manager = LifespanManager(app, mock.Mock(), state) await manager.startup() assert state.get("db") == "connected" await manager.shutdown() # ============================================================================ # Tests for WebSocket Protocol # ============================================================================ class TestWebSocketProtocol: """Tests for WebSocket protocol handling.""" def test_websocket_guid(self): """Test WebSocket GUID constant.""" from gunicorn.asgi.websocket import WS_GUID assert WS_GUID == b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11" def test_websocket_opcodes(self): """Test WebSocket opcode constants.""" from gunicorn.asgi import websocket assert websocket.OPCODE_TEXT == 0x1 assert websocket.OPCODE_BINARY == 0x2 assert websocket.OPCODE_CLOSE == 0x8 assert websocket.OPCODE_PING == 0x9 assert websocket.OPCODE_PONG == 0xA def test_websocket_accept_key_calculation(self): """Test WebSocket accept key calculation per RFC 6455.""" import base64 import hashlib from gunicorn.asgi.websocket import WS_GUID # Example from RFC 6455 client_key = b"dGhlIHNhbXBsZSBub25jZQ==" expected_accept = "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=" accept_key = base64.b64encode( hashlib.sha1(client_key + WS_GUID).digest() ).decode("ascii") assert accept_key == expected_accept def test_websocket_frame_masking(self): """Test WebSocket frame unmasking.""" from gunicorn.asgi.websocket import WebSocketProtocol # Create a minimal protocol instance protocol = WebSocketProtocol(None, None, {}, None, mock.Mock()) # Test unmasking (XOR operation) masking_key = bytes([0x37, 0xfa, 0x21, 0x3d]) masked_data = bytes([0x7f, 0x9f, 0x4d, 0x51, 0x58]) # "Hello" masked unmasked = protocol._unmask(masked_data, masking_key) assert unmasked == b"Hello" def test_websocket_frame_masking_empty(self): """Test WebSocket frame unmasking with empty payload.""" from gunicorn.asgi.websocket import WebSocketProtocol protocol = WebSocketProtocol(None, None, {}, None, mock.Mock()) masking_key = bytes([0x37, 0xfa, 0x21, 0x3d]) unmasked = protocol._unmask(b"", masking_key) assert unmasked == b"" # ============================================================================ # Integration Tests # ============================================================================ class TestASGIIntegration: """Integration tests that start actual servers.""" @pytest.fixture def free_port(self): """Get a free port for testing.""" with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('127.0.0.1', 0)) return s.getsockname()[1] @pytest.mark.asyncio async def test_http_request_response(self, free_port): """Test basic HTTP request/response cycle.""" # Simple ASGI app async def app(scope, receive, send): if scope["type"] == "http": await send({ "type": "http.response.start", "status": 200, "headers": [(b"content-type", b"text/plain")], }) await send({ "type": "http.response.body", "body": b"Hello, World!", }) # Start server loop = asyncio.get_event_loop() server = await loop.create_server( lambda: _TestProtocol(app), '127.0.0.1', free_port, ) try: # Use asyncio to make HTTP request reader, writer = await asyncio.open_connection('127.0.0.1', free_port) request = f"GET / HTTP/1.1\r\nHost: 127.0.0.1:{free_port}\r\n\r\n" writer.write(request.encode()) await writer.drain() # Read response response = await reader.read(4096) response_text = response.decode() assert "HTTP/1.1 200" in response_text assert "Hello, World!" in response_text writer.close() await writer.wait_closed() finally: server.close() await server.wait_closed() class _TestProtocol(asyncio.Protocol): """Minimal protocol for integration testing.""" def __init__(self, app): self.app = app self.transport = None def connection_made(self, transport): self.transport = transport def data_received(self, data): # Very simple HTTP parsing for testing asyncio.create_task(self._handle(data)) async def _handle(self, data): # Parse basic HTTP request lines = data.decode().split('\r\n') method, path, _ = lines[0].split(' ') scope = { "type": "http", "asgi": {"version": "3.0"}, "http_version": "1.1", "method": method, "path": path, "query_string": b"", "headers": [], "server": ("127.0.0.1", 8000), "client": ("127.0.0.1", 12345), } async def receive(): return {"type": "http.request", "body": b"", "more_body": False} async def send(message): if message["type"] == "http.response.start": status = message["status"] headers = message.get("headers", []) response = f"HTTP/1.1 {status} OK\r\n" for name, value in headers: if isinstance(name, bytes): name = name.decode() if isinstance(value, bytes): value = value.decode() response += f"{name}: {value}\r\n" response += "\r\n" self.transport.write(response.encode()) elif message["type"] == "http.response.body": body = message.get("body", b"") self.transport.write(body) if not message.get("more_body", False): self.transport.close() await self.app(scope, receive, send) # ============================================================================ # ASGI Protocol Tests # ============================================================================ class TestASGIProtocol: """Tests for ASGIProtocol.""" def test_reason_phrases(self): """Test HTTP reason phrase lookup.""" from gunicorn.asgi.protocol import ASGIProtocol # Create minimal worker mock worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) assert protocol._get_reason_phrase(200) == "OK" assert protocol._get_reason_phrase(404) == "Not Found" assert protocol._get_reason_phrase(500) == "Internal Server Error" assert protocol._get_reason_phrase(999) == "Unknown" def test_scope_building(self): """Test HTTP scope building.""" from gunicorn.asgi.protocol import ASGIProtocol from gunicorn.asgi.message import AsyncRequest worker = mock.Mock() worker.cfg = Config() worker.cfg.set('root_path', '/api') worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) # Create mock request request = mock.Mock() request.method = "GET" request.path = "/users" request.query = "page=1" request.version = (1, 1) request.scheme = "http" request.headers = [("HOST", "localhost"), ("ACCEPT", "text/html")] scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), # sockname ("127.0.0.1", 12345), # peername ) assert scope["type"] == "http" assert scope["method"] == "GET" assert scope["path"] == "/users" assert scope["query_string"] == b"page=1" assert scope["root_path"] == "/api" assert scope["http_version"] == "1.1" # ============================================================================ # Config Tests # ============================================================================ class TestASGIConfig: """Tests for ASGI configuration options.""" def test_asgi_loop_default(self): """Test default asgi_loop value.""" cfg = Config() assert cfg.asgi_loop == "auto" def test_asgi_loop_validation(self): """Test asgi_loop validation.""" cfg = Config() cfg.set('asgi_loop', 'asyncio') assert cfg.asgi_loop == 'asyncio' cfg.set('asgi_loop', 'uvloop') assert cfg.asgi_loop == 'uvloop' with pytest.raises(ValueError): cfg.set('asgi_loop', 'invalid') def test_asgi_lifespan_default(self): """Test default asgi_lifespan value.""" cfg = Config() assert cfg.asgi_lifespan == "auto" def test_asgi_lifespan_validation(self): """Test asgi_lifespan validation.""" cfg = Config() cfg.set('asgi_lifespan', 'on') assert cfg.asgi_lifespan == 'on' cfg.set('asgi_lifespan', 'off') assert cfg.asgi_lifespan == 'off' with pytest.raises(ValueError): cfg.set('asgi_lifespan', 'invalid') def test_root_path_default(self): """Test default root_path value.""" cfg = Config() assert cfg.root_path == "" def test_root_path_setting(self): """Test root_path configuration.""" cfg = Config() cfg.set('root_path', '/api/v1') assert cfg.root_path == '/api/v1' # ============================================================================ # HTTP/2 Priority Tests # ============================================================================ class TestASGIHTTP2Priority: """Test HTTP/2 priority in ASGI scope.""" def test_http2_priority_in_scope(self): """Test that HTTP/2 priority is added to ASGI scope extensions.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) # Create mock HTTP/2 request with priority request = mock.Mock() request.method = "GET" request.path = "/test" request.query = "" request.version = (2, 0) request.scheme = "https" request.headers = [("HOST", "localhost")] request.priority_weight = 128 request.priority_depends_on = 3 scope = protocol._build_http_scope( request, ("127.0.0.1", 8443), ("127.0.0.1", 12345), ) assert "extensions" in scope assert "http.response.priority" in scope["extensions"] assert scope["extensions"]["http.response.priority"]["weight"] == 128 assert scope["extensions"]["http.response.priority"]["depends_on"] == 3 def test_http2_priority_in_http2_scope(self): """Test that HTTP/2 priority is in _build_http2_scope.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) # Create mock HTTP/2 request with priority request = mock.Mock() request.method = "POST" request.path = "/api/data" request.query = "id=1" request.uri = "/api/data?id=1" request.scheme = "https" request.headers = [("HOST", "localhost"), ("CONTENT-TYPE", "application/json")] request.priority_weight = 256 request.priority_depends_on = 1 scope = protocol._build_http2_scope( request, ("127.0.0.1", 8443), ("127.0.0.1", 12345), ) assert scope["http_version"] == "2" assert "extensions" in scope assert "http.response.priority" in scope["extensions"] assert scope["extensions"]["http.response.priority"]["weight"] == 256 assert scope["extensions"]["http.response.priority"]["depends_on"] == 1 def test_no_priority_for_http1_requests(self): """Test that HTTP/1.1 requests don't have priority extensions.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) # Create mock HTTP/1.1 request (no priority attributes) request = mock.Mock(spec=['method', 'path', 'query', 'version', 'scheme', 'headers']) request.method = "GET" request.path = "/test" request.query = "" request.version = (1, 1) request.scheme = "http" request.headers = [("HOST", "localhost")] scope = protocol._build_http_scope( request, ("127.0.0.1", 8000), ("127.0.0.1", 12345), ) # HTTP/1.1 requests should not have extensions with priority assert "extensions" not in scope or "http.response.priority" not in scope.get("extensions", {}) # ============================================================================ # HTTP/2 Trailers Tests # ============================================================================ class TestASGIHTTP2Trailers: """Test HTTP/2 response trailer support in ASGI.""" def test_http2_trailers_extension_in_scope(self): """Test that HTTP/2 scope includes http.response.trailers extension.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) # Create mock HTTP/2 request request = mock.Mock() request.method = "GET" request.path = "/api" request.query = "" request.uri = "/api" request.scheme = "https" request.headers = [("HOST", "localhost")] request.priority_weight = 16 request.priority_depends_on = 0 scope = protocol._build_http2_scope( request, ("127.0.0.1", 8443), ("127.0.0.1", 12345), ) # HTTP/2 scope should have trailers extension assert "extensions" in scope assert "http.response.trailers" in scope["extensions"] def test_http2_scope_has_both_priority_and_trailers(self): """Test that HTTP/2 scope includes both priority and trailers extensions.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.Mock() worker.cfg = Config() worker.log = mock.Mock() worker.asgi = mock.Mock() protocol = ASGIProtocol(worker) request = mock.Mock() request.method = "POST" request.path = "/grpc" request.query = "" request.uri = "/grpc" request.scheme = "https" request.headers = [("HOST", "localhost"), ("CONTENT-TYPE", "application/grpc")] request.priority_weight = 128 request.priority_depends_on = 1 scope = protocol._build_http2_scope( request, ("127.0.0.1", 8443), ("127.0.0.1", 54321), ) extensions = scope.get("extensions", {}) assert "http.response.priority" in extensions assert "http.response.trailers" in extensions assert extensions["http.response.priority"]["weight"] == 128 benoitc-gunicorn-f5fb19e/tests/test_config.py000066400000000000000000000372311514360242400215110ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import re import sys import pytest from gunicorn import config from gunicorn.app.base import Application from gunicorn.app.wsgiapp import WSGIApplication from gunicorn.errors import ConfigError from gunicorn.util import load_class from gunicorn.workers.sync import SyncWorker from gunicorn import glogging from gunicorn.instrument import statsd dirname = os.path.dirname(__file__) def cfg_module(): return 'config.test_cfg' def alt_cfg_module(): return 'config.test_cfg_alt' def cfg_file(): return os.path.join(dirname, "config", "test_cfg.py") def alt_cfg_file(): return os.path.join(dirname, "config", "test_cfg_alt.py") def cfg_file_with_wsgi_app(): return os.path.join(dirname, "config", "test_cfg_with_wsgi_app.py") def paster_ini(): return os.path.join(dirname, "..", "examples", "frameworks", "pylonstest", "nose.ini") class AltArgs: def __init__(self, args=None): self.args = args or [] self.orig = sys.argv def __enter__(self): sys.argv = self.args def __exit__(self, exc_type, exc_inst, traceback): sys.argv = self.orig class NoConfigApp(Application): def __init__(self): super().__init__("no_usage", prog="gunicorn_test") def init(self, parser, opts, args): pass def load(self): pass class CustomWorker(SyncWorker): pass class WSGIApp(WSGIApplication): def __init__(self): super().__init__("no_usage", prog="gunicorn_test") def load(self): pass def test_worker_class(): c = config.Config() c.set("worker_class", CustomWorker) assert c.worker_class == CustomWorker try: assert isinstance(load_class(c.worker_class), object) except AttributeError: pytest.fail("'load_class doesn't support type class argument'") def test_defaults(): c = config.Config() for s in config.KNOWN_SETTINGS: assert c.settings[s.name].validator(s.default) == c.settings[s.name].get() def test_property_access(): c = config.Config() for s in config.KNOWN_SETTINGS: getattr(c, s.name) # Class was loaded assert c.worker_class == SyncWorker # logger class was loaded assert c.logger_class == glogging.Logger # Workers defaults to 1 assert c.workers == 1 c.set("workers", 3) assert c.workers == 3 # Address is parsed assert c.address == [("127.0.0.1", 8000)] # User and group defaults assert os.geteuid() == c.uid assert os.getegid() == c.gid # Proc name assert "gunicorn" == c.proc_name # Not a config property pytest.raises(AttributeError, getattr, c, "foo") # Force to be not an error class Baz: def get(self): return 3.14 c.settings["foo"] = Baz() assert c.foo == 3.14 # Attempt to set a cfg not via c.set pytest.raises(AttributeError, setattr, c, "proc_name", "baz") # No setting for name pytest.raises(AttributeError, c.set, "baz", "bar") def test_bool_validation(): c = config.Config() assert c.preload_app is False c.set("preload_app", True) assert c.preload_app is True c.set("preload_app", "true") assert c.preload_app is True c.set("preload_app", "false") assert c.preload_app is False pytest.raises(ValueError, c.set, "preload_app", "zilch") pytest.raises(TypeError, c.set, "preload_app", 4) def test_pos_int_validation(): c = config.Config() assert c.workers == 1 c.set("workers", 4) assert c.workers == 4 c.set("workers", "5") assert c.workers == 5 c.set("workers", "0xFF") assert c.workers == 255 c.set("workers", True) assert c.workers == 1 # Yes. That's right... pytest.raises(ValueError, c.set, "workers", -21) pytest.raises(TypeError, c.set, "workers", c) def test_str_validation(): c = config.Config() assert c.proc_name == "gunicorn" c.set("proc_name", " foo ") assert c.proc_name == "foo" pytest.raises(TypeError, c.set, "proc_name", 2) def test_str_to_addr_list_validation(): c = config.Config() # Values remain as strings for backward compatibility assert c.proxy_allow_ips == ["127.0.0.1", "::1"] assert c.forwarded_allow_ips == ["127.0.0.1", "::1"] # Single IPs are validated but kept as strings c.set("forwarded_allow_ips", "127.0.0.1,192.0.2.1") assert c.forwarded_allow_ips == ["127.0.0.1", "192.0.2.1"] # CIDR networks are supported and kept as strings c.set("forwarded_allow_ips", "127.0.0.0/8,192.168.0.0/16") assert c.forwarded_allow_ips == ["127.0.0.0/8", "192.168.0.0/16"] # Wildcard is preserved as string c.set("forwarded_allow_ips", "*") assert c.forwarded_allow_ips == ["*"] c.set("forwarded_allow_ips", "") assert c.forwarded_allow_ips == [] c.set("forwarded_allow_ips", None) assert c.forwarded_allow_ips == [] # demand addresses are specified unambiguously pytest.raises(TypeError, c.set, "forwarded_allow_ips", 1) # demand networks are specified unambiguously pytest.raises(ValueError, c.set, "forwarded_allow_ips", "127.0.0") # detect typos pytest.raises(ValueError, c.set, "forwarded_allow_ips", "::f:") # dangerous typos such as accidentally permitting half the internet # clearly recognizable - masked bits are not zero pytest.raises(ValueError, c.set, "forwarded_allow_ips", "100.64.0.0/1") def test_str_to_list(): c = config.Config() assert c.forwarder_headers == ["SCRIPT_NAME", "PATH_INFO"] c.set("forwarder_headers", "SCRIPT_NAME,REMOTE_USER") assert c.forwarder_headers == ["SCRIPT_NAME", "REMOTE_USER"] c.set("forwarder_headers", "") assert c.forwarder_headers == [] c.set("forwarder_headers", None) assert c.forwarder_headers == [] def test_callable_validation(): c = config.Config() def func(a, b): pass c.set("pre_fork", func) assert c.pre_fork == func pytest.raises(TypeError, c.set, "pre_fork", 1) pytest.raises(TypeError, c.set, "pre_fork", lambda x: True) def test_reload_engine_validation(): c = config.Config() assert c.reload_engine == "auto" c.set('reload_engine', 'poll') assert c.reload_engine == 'poll' pytest.raises(ConfigError, c.set, "reload_engine", "invalid") def test_callable_validation_for_string(): from os.path import isdir as testfunc assert config.validate_callable(-1)("os.path.isdir") == testfunc # invalid values tests pytest.raises( TypeError, config.validate_callable(-1), "" ) pytest.raises( TypeError, config.validate_callable(-1), "os.path.not_found_func" ) pytest.raises( TypeError, config.validate_callable(-1), "notfoundmodule.func" ) def test_cmd_line(): with AltArgs(["prog_name", "-b", "blargh"]): app = NoConfigApp() assert app.cfg.bind == ["blargh"] with AltArgs(["prog_name", "-w", "3"]): app = NoConfigApp() assert app.cfg.workers == 3 with AltArgs(["prog_name", "--preload"]): app = NoConfigApp() assert app.cfg.preload_app def test_cmd_line_invalid_setting(capsys): with AltArgs(["prog_name", "-q", "bar"]): with pytest.raises(SystemExit): NoConfigApp() _, err = capsys.readouterr() assert "error: unrecognized arguments: -q" in err def test_app_config(): with AltArgs(): app = NoConfigApp() for s in config.KNOWN_SETTINGS: assert app.cfg.settings[s.name].validator(s.default) == app.cfg.settings[s.name].get() def test_load_config(): with AltArgs(["prog_name", "-c", cfg_file()]): app = NoConfigApp() assert app.cfg.bind == ["unix:/tmp/bar/baz"] assert app.cfg.workers == 3 assert app.cfg.proc_name == "fooey" def test_load_config_explicit_file(): with AltArgs(["prog_name", "-c", "file:%s" % cfg_file()]): app = NoConfigApp() assert app.cfg.bind == ["unix:/tmp/bar/baz"] assert app.cfg.workers == 3 assert app.cfg.proc_name == "fooey" def test_load_config_module(): with AltArgs(["prog_name", "-c", "python:%s" % cfg_module()]): app = NoConfigApp() assert app.cfg.bind == ["unix:/tmp/bar/baz"] assert app.cfg.workers == 3 assert app.cfg.proc_name == "fooey" def test_cli_overrides_config(): with AltArgs(["prog_name", "-c", cfg_file(), "-b", "blarney"]): app = NoConfigApp() assert app.cfg.bind == ["blarney"] assert app.cfg.proc_name == "fooey" def test_cli_overrides_config_module(): with AltArgs(["prog_name", "-c", "python:%s" % cfg_module(), "-b", "blarney"]): app = NoConfigApp() assert app.cfg.bind == ["blarney"] assert app.cfg.proc_name == "fooey" @pytest.fixture def create_config_file(request): default_config = os.path.join(os.path.abspath(os.getcwd()), 'gunicorn.conf.py') with open(default_config, 'w+') as default: default.write("bind='0.0.0.0:9090'") def fin(): os.unlink(default_config) request.addfinalizer(fin) return default def test_default_config_file(create_config_file): assert config.get_default_config_file() == create_config_file.name with AltArgs(["prog_name"]): app = NoConfigApp() assert app.cfg.bind == ["0.0.0.0:9090"] def test_post_request(): c = config.Config() def post_request_4(worker, req, environ, resp): return 4 def post_request_3(worker, req, environ): return 3 def post_request_2(worker, req): return 2 c.set("post_request", post_request_4) assert c.post_request(1, 2, 3, 4) == 4 c.set("post_request", post_request_3) assert c.post_request(1, 2, 3, 4) == 3 c.set("post_request", post_request_2) assert c.post_request(1, 2, 3, 4) == 2 def test_nworkers_changed(): c = config.Config() def nworkers_changed_3(server, new_value, old_value): return 3 c.set("nworkers_changed", nworkers_changed_3) assert c.nworkers_changed(1, 2, 3) == 3 def test_statsd_host(): c = config.Config() assert c.statsd_host is None c.set("statsd_host", "localhost") assert c.statsd_host == ("localhost", 8125) c.set("statsd_host", "statsd:7777") assert c.statsd_host == ("statsd", 7777) c.set("statsd_host", "unix:///path/to.sock") assert c.statsd_host == "/path/to.sock" pytest.raises(TypeError, c.set, "statsd_host", 666) pytest.raises(TypeError, c.set, "statsd_host", "host:string") def test_statsd_host_with_unix_as_hostname(): # This is a regression test for major release 20. After this release # we should consider modifying the behavior of util.parse_address to # simplify gunicorn's code c = config.Config() c.set("statsd_host", "unix:7777") assert c.statsd_host == ("unix", 7777) c.set("statsd_host", "unix://some.socket") assert c.statsd_host == "some.socket" def test_statsd_changes_logger(): c = config.Config() assert c.logger_class == glogging.Logger c.set('statsd_host', 'localhost:12345') assert c.logger_class == statsd.Statsd class MyLogger(glogging.Logger): # dummy custom logger class for testing pass def test_always_use_configured_logger(): c = config.Config() c.set('logger_class', __name__ + '.MyLogger') assert c.logger_class == MyLogger c.set('statsd_host', 'localhost:12345') # still uses custom logger over statsd assert c.logger_class == MyLogger def test_load_enviroment_variables_config(monkeypatch): monkeypatch.setenv("GUNICORN_CMD_ARGS", "--workers=4") with AltArgs(): app = NoConfigApp() assert app.cfg.workers == 4 def test_config_file_environment_variable(monkeypatch): monkeypatch.setenv("GUNICORN_CMD_ARGS", "--config=" + alt_cfg_file()) with AltArgs(): app = NoConfigApp() assert app.cfg.proc_name == "not-fooey" assert app.cfg.config == alt_cfg_file() with AltArgs(["prog_name", "--config", cfg_file()]): app = NoConfigApp() assert app.cfg.proc_name == "fooey" assert app.cfg.config == cfg_file() def test_invalid_enviroment_variables_config(monkeypatch, capsys): monkeypatch.setenv("GUNICORN_CMD_ARGS", "--foo=bar") with AltArgs(): with pytest.raises(SystemExit): NoConfigApp() _, err = capsys.readouterr() assert "error: unrecognized arguments: --foo" in err def test_cli_overrides_enviroment_variables_module(monkeypatch): monkeypatch.setenv("GUNICORN_CMD_ARGS", "--workers=4") with AltArgs(["prog_name", "-c", cfg_file(), "--workers", "3"]): app = NoConfigApp() assert app.cfg.workers == 3 @pytest.mark.parametrize("options, expected", [ (["app:app"], 'app:app'), (["-c", cfg_file(), "app:app"], 'app:app'), (["-c", cfg_file_with_wsgi_app(), "app:app"], 'app:app'), (["-c", cfg_file_with_wsgi_app()], 'app1:app1'), ]) def test_wsgi_app_config(options, expected): cmdline = ["prog_name"] cmdline.extend(options) with AltArgs(cmdline): app = WSGIApp() assert app.app_uri == expected @pytest.mark.parametrize("options", [ ([]), (["-c", cfg_file()]), ]) def test_non_wsgi_app(options, capsys): cmdline = ["prog_name"] cmdline.extend(options) with AltArgs(cmdline): with pytest.raises(SystemExit): WSGIApp() _, err = capsys.readouterr() assert "Error: No application module specified." in err @pytest.mark.parametrize("options, expected", [ (["myapp:app"], False), (["--reload", "myapp:app"], True), (["--reload", "--", "myapp:app"], True), (["--reload", "-w 2", "myapp:app"], True), ]) def test_reload(options, expected): cmdline = ["prog_name"] cmdline.extend(options) with AltArgs(cmdline): app = NoConfigApp() assert app.cfg.reload == expected @pytest.mark.parametrize("options, expected", [ (["--umask", "0", "myapp:app"], 0), (["--umask", "0o0", "myapp:app"], 0), (["--umask", "0x0", "myapp:app"], 0), (["--umask", "0xFF", "myapp:app"], 255), (["--umask", "0022", "myapp:app"], 18), ]) def test_umask_config(options, expected): cmdline = ["prog_name"] cmdline.extend(options) with AltArgs(cmdline): app = NoConfigApp() assert app.cfg.umask == expected def _test_ssl_version(options, expected): cmdline = ["prog_name"] cmdline.extend(options) with AltArgs(cmdline): app = NoConfigApp() assert app.cfg.ssl_version == expected def test_bind_fd(): with AltArgs(["prog_name", "-b", "fd://42"]): app = NoConfigApp() assert app.cfg.bind == ["fd://42"] def test_repr(): c = config.Config() c.set("workers", 5) assert "with value 5" in repr(c.settings['workers']) def test_str(): c = config.Config() o = str(c) # match the first few lines, some different types, but don't go OTT # to avoid needless test fails with changes OUTPUT_MATCH = { 'access_log_format': '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"', 'accesslog': 'None', 'backlog': '2048', 'bind': "['127.0.0.1:8000']", 'capture_output': 'False', 'child_exit': '', } for i, line in enumerate(o.splitlines()): m = re.match(r'^(\w+)\s+= ', line) assert m, "Line {} didn't match expected format: {!r}".format(i, line) key = m.group(1) try: s = OUTPUT_MATCH.pop(key) except KeyError: continue line_re = r'^{}\s+= {}$'.format(key, re.escape(s)) assert re.match(line_re, line), '{!r} != {!r}'.format(line_re, line) if not OUTPUT_MATCH: break else: assert False, 'missing expected setting lines? {}'.format( OUTPUT_MATCH.keys() ) benoitc-gunicorn-f5fb19e/tests/test_dirty_app.py000066400000000000000000000302571514360242400222400ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty app module.""" import pytest from gunicorn.dirty.app import ( DirtyApp, load_dirty_app, load_dirty_apps, parse_dirty_app_spec, ) from gunicorn.dirty.errors import DirtyAppError, DirtyAppNotFoundError class TestDirtyAppBase: """Tests for DirtyApp base class.""" def test_base_class_methods_exist(self): """Test that base class has all required methods.""" app = DirtyApp() assert hasattr(app, 'init') assert hasattr(app, '__call__') assert hasattr(app, 'close') assert callable(app.init) assert callable(app.close) def test_base_init_is_noop(self): """Test that base init does nothing.""" app = DirtyApp() result = app.init() assert result is None def test_base_close_is_noop(self): """Test that base close does nothing.""" app = DirtyApp() result = app.close() assert result is None def test_base_call_dispatches_to_method(self): """Test that base __call__ dispatches to methods.""" class TestApp(DirtyApp): def my_action(self, x, y): return x + y app = TestApp() result = app("my_action", 1, 2) assert result == 3 def test_base_call_unknown_action(self): """Test that __call__ raises for unknown action.""" app = DirtyApp() with pytest.raises(ValueError) as exc_info: app("unknown_action") assert "Unknown action" in str(exc_info.value) def test_base_call_private_method_rejected(self): """Test that __call__ rejects private methods.""" class TestApp(DirtyApp): def _private(self): return "secret" app = TestApp() with pytest.raises(ValueError) as exc_info: app("_private") assert "Unknown action" in str(exc_info.value) class TestLoadDirtyApp: """Tests for load_dirty_app function.""" def test_load_valid_app(self): """Test loading a valid dirty app.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") assert app is not None assert hasattr(app, 'init') assert hasattr(app, 'close') def test_load_app_instance_not_initialized(self): """Test that loaded app is not auto-initialized.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") assert app.initialized is False def test_load_app_init_can_be_called(self): """Test that init can be called on loaded app.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") app.init() assert app.initialized is True assert app.data['init_called'] is True def test_load_app_call_works(self): """Test that loaded app can be called.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") result = app("compute", 2, 3, operation="add") assert result == 5 result = app("compute", 2, 3, operation="multiply") assert result == 6 def test_load_app_close_works(self): """Test that close works on loaded app.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") app("store", "key", "value") assert app.data.get("key") == "value" app.close() assert app.closed is True assert app.data == {} def test_load_missing_module(self): """Test loading from non-existent module.""" with pytest.raises(DirtyAppNotFoundError) as exc_info: load_dirty_app("nonexistent.module:App") assert "not found" in str(exc_info.value).lower() def test_load_missing_class(self): """Test loading non-existent class from valid module.""" with pytest.raises(DirtyAppNotFoundError): load_dirty_app("tests.support_dirty_app:NonExistentApp") def test_load_invalid_format_no_colon(self): """Test loading with invalid format (no colon).""" with pytest.raises(DirtyAppError) as exc_info: load_dirty_app("tests.support_dirty_app.TestDirtyApp") assert "Invalid import path format" in str(exc_info.value) def test_load_not_a_class(self): """Test loading something that's not a class.""" with pytest.raises(DirtyAppError) as exc_info: load_dirty_app("tests.support_dirty_app:not_a_class") assert "not a class" in str(exc_info.value).lower() def test_load_broken_instantiation(self): """Test loading an app that fails during instantiation.""" with pytest.raises(DirtyAppError) as exc_info: load_dirty_app("tests.support_dirty_app:BrokenInstantiationApp") assert "Failed to instantiate" in str(exc_info.value) class TestLoadDirtyApps: """Tests for load_dirty_apps function.""" def test_load_multiple_apps(self): """Test loading multiple apps.""" apps = load_dirty_apps([ "tests.support_dirty_app:TestDirtyApp", ]) assert len(apps) == 1 assert "tests.support_dirty_app:TestDirtyApp" in apps def test_load_empty_list(self): """Test loading with empty list.""" apps = load_dirty_apps([]) assert apps == {} def test_load_multiple_fails_on_first_error(self): """Test that loading stops on first error.""" with pytest.raises(DirtyAppNotFoundError): load_dirty_apps([ "tests.support_dirty_app:TestDirtyApp", "nonexistent:App", # This should fail ]) class TestDirtyAppStateful: """Tests for stateful dirty app behavior.""" def test_app_maintains_state(self): """Test that app maintains state between calls.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") app.init() # Store some data app("store", "model", {"weights": [1, 2, 3]}) app("store", "config", {"lr": 0.001}) # Retrieve data model = app("retrieve", "model") config = app("retrieve", "config") assert model == {"weights": [1, 2, 3]} assert config == {"lr": 0.001} def test_app_error_handling(self): """Test that errors from app are raised properly.""" app = load_dirty_app("tests.support_dirty_app:TestDirtyApp") with pytest.raises(ValueError) as exc_info: app("compute", 1, 2, operation="invalid") assert "Unknown operation" in str(exc_info.value) class TestDirtyAppWorkersAttribute: """Tests for DirtyApp workers class attribute.""" def test_default_workers_is_none(self): """Base DirtyApp has workers=None (all workers).""" assert DirtyApp.workers is None def test_subclass_can_set_workers(self): """Subclass can override workers=2.""" class LimitedApp(DirtyApp): workers = 2 assert LimitedApp.workers == 2 def test_workers_inherited_by_default(self): """Subclass without workers attr inherits None.""" class InheritedApp(DirtyApp): pass assert InheritedApp.workers is None def test_instance_has_workers_attribute(self): """Instance should have access to workers attribute.""" app = DirtyApp() assert app.workers is None class LimitedApp(DirtyApp): workers = 3 limited = LimitedApp() assert limited.workers == 3 class TestParseDirtyAppSpec: """Tests for parse_dirty_app_spec function.""" def test_standard_format(self): """'mod:Class' returns ('mod:Class', None).""" import_path, count = parse_dirty_app_spec("mod:Class") assert import_path == "mod:Class" assert count is None def test_standard_format_with_dots(self): """'mod.sub.pkg:Class' returns ('mod.sub.pkg:Class', None).""" import_path, count = parse_dirty_app_spec("mod.sub.pkg:Class") assert import_path == "mod.sub.pkg:Class" assert count is None def test_with_worker_count(self): """'mod:Class:2' returns ('mod:Class', 2).""" import_path, count = parse_dirty_app_spec("mod:Class:2") assert import_path == "mod:Class" assert count == 2 def test_worker_count_one(self): """'mod:Class:1' returns ('mod:Class', 1).""" import_path, count = parse_dirty_app_spec("mod:Class:1") assert import_path == "mod:Class" assert count == 1 def test_worker_count_large(self): """'mod:Class:100' returns ('mod:Class', 100).""" import_path, count = parse_dirty_app_spec("mod:Class:100") assert import_path == "mod:Class" assert count == 100 def test_worker_count_zero_raises(self): """'mod:Class:0' raises DirtyAppError.""" with pytest.raises(DirtyAppError) as exc_info: parse_dirty_app_spec("mod:Class:0") assert "must be >= 1" in str(exc_info.value) def test_worker_count_negative_raises(self): """'mod:Class:-1' raises DirtyAppError.""" with pytest.raises(DirtyAppError) as exc_info: parse_dirty_app_spec("mod:Class:-1") assert "must be >= 1" in str(exc_info.value) def test_non_numeric_raises(self): """'mod:Class:abc' raises DirtyAppError.""" with pytest.raises(DirtyAppError) as exc_info: parse_dirty_app_spec("mod:Class:abc") assert "Expected integer" in str(exc_info.value) def test_no_colon_raises(self): """'mod.Class' (no colon) raises DirtyAppError.""" with pytest.raises(DirtyAppError) as exc_info: parse_dirty_app_spec("mod.Class") assert "Invalid import path format" in str(exc_info.value) def test_too_many_colons_raises(self): """'mod:Class:2:extra' raises DirtyAppError.""" with pytest.raises(DirtyAppError) as exc_info: parse_dirty_app_spec("mod:Class:2:extra") assert "Invalid import path format" in str(exc_info.value) def test_dotted_module_with_count(self): """'mod.sub:Class:2' handles dots correctly.""" import_path, count = parse_dirty_app_spec("mod.sub:Class:2") assert import_path == "mod.sub:Class" assert count == 2 class TestGetAppWorkersAttribute: """Tests for get_app_workers_attribute function.""" def test_get_workers_none_for_base_class(self): """Base DirtyApp returns workers=None.""" from gunicorn.dirty.app import get_app_workers_attribute workers = get_app_workers_attribute("gunicorn.dirty.app:DirtyApp") assert workers is None def test_get_workers_from_class_attribute(self): """App with workers=2 class attribute returns 2.""" from gunicorn.dirty.app import get_app_workers_attribute workers = get_app_workers_attribute("tests.support_dirty_app:HeavyModelApp") assert workers == 2 def test_get_workers_none_for_inherited(self): """App without explicit workers attribute returns None.""" from gunicorn.dirty.app import get_app_workers_attribute workers = get_app_workers_attribute("tests.support_dirty_app:TestDirtyApp") assert workers is None def test_get_workers_not_found_module(self): """Non-existent module raises DirtyAppNotFoundError.""" from gunicorn.dirty.app import get_app_workers_attribute from gunicorn.dirty.errors import DirtyAppNotFoundError with pytest.raises(DirtyAppNotFoundError): get_app_workers_attribute("nonexistent.module:App") def test_get_workers_not_found_class(self): """Non-existent class raises DirtyAppNotFoundError.""" from gunicorn.dirty.app import get_app_workers_attribute from gunicorn.dirty.errors import DirtyAppNotFoundError with pytest.raises(DirtyAppNotFoundError): get_app_workers_attribute("tests.support_dirty_app:NonExistentApp") def test_get_workers_invalid_format(self): """Invalid format raises DirtyAppError.""" from gunicorn.dirty.app import get_app_workers_attribute from gunicorn.dirty.errors import DirtyAppError with pytest.raises(DirtyAppError) as exc_info: get_app_workers_attribute("invalid.format.no.colon") assert "Invalid import path format" in str(exc_info.value) benoitc-gunicorn-f5fb19e/tests/test_dirty_arbiter.py000066400000000000000000001236031514360242400231060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty arbiter module.""" import asyncio import os import signal import struct import tempfile import pytest from gunicorn.config import Config from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.errors import DirtyError from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_request, HEADER_SIZE, ) class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.closed = False def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): self.closed = True async def wait_closed(self): pass def get_extra_info(self, name): return None class MockLog: """Mock logger for testing.""" def __init__(self): self.messages = [] def debug(self, msg, *args): self.messages.append(("debug", msg % args if args else msg)) def info(self, msg, *args): self.messages.append(("info", msg % args if args else msg)) def warning(self, msg, *args): self.messages.append(("warning", msg % args if args else msg)) def error(self, msg, *args): self.messages.append(("error", msg % args if args else msg)) def critical(self, msg, *args): self.messages.append(("critical", msg % args if args else msg)) def exception(self, msg, *args): self.messages.append(("exception", msg % args if args else msg)) def close_on_exec(self): pass def reopen_files(self): pass class TestDirtyArbiterInit: """Tests for DirtyArbiter initialization.""" def test_init_attributes(self): """Test that arbiter is initialized with correct attributes.""" cfg = Config() cfg.set("dirty_workers", 2) cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) assert arbiter.cfg == cfg assert arbiter.log == log assert arbiter.workers == {} assert arbiter.alive is True assert arbiter.worker_age == 0 assert arbiter.tmpdir is not None assert os.path.isdir(arbiter.tmpdir) # Cleanup arbiter._cleanup_sync() def test_init_with_custom_socket_path(self): """Test initialization with custom socket path.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "custom.sock") arbiter = DirtyArbiter(cfg=cfg, log=log, socket_path=socket_path) assert arbiter.socket_path == socket_path # Cleanup arbiter._cleanup_sync() def test_init_with_pidfile(self): """Test initialization with pidfile parameter.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: pidfile = os.path.join(tmpdir, "dirty.pid") arbiter = DirtyArbiter(cfg=cfg, log=log, pidfile=pidfile) assert arbiter.pidfile == pidfile # Cleanup arbiter._cleanup_sync() def test_init_without_pidfile(self): """Test initialization without pidfile parameter defaults to None.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) assert arbiter.pidfile is None # Cleanup arbiter._cleanup_sync() class TestDirtyArbiterCleanup: """Tests for arbiter cleanup.""" def test_cleanup_removes_socket(self): """Test that cleanup removes the socket file.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") arbiter = DirtyArbiter(cfg=cfg, log=log, socket_path=socket_path) # Create socket file with open(socket_path, 'w') as f: f.write('') assert os.path.exists(socket_path) arbiter._cleanup_sync() assert not os.path.exists(socket_path) def test_cleanup_removes_tmpdir(self): """Test that cleanup removes the temp directory.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) tmpdir = arbiter.tmpdir assert os.path.isdir(tmpdir) arbiter._cleanup_sync() assert not os.path.exists(tmpdir) def test_cleanup_removes_pidfile(self): """Test that cleanup removes the PID file.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: pidfile = os.path.join(tmpdir, "dirty.pid") arbiter = DirtyArbiter(cfg=cfg, log=log, pidfile=pidfile) # Create pidfile with open(pidfile, 'w') as f: f.write('12345') assert os.path.exists(pidfile) arbiter._cleanup_sync() assert not os.path.exists(pidfile) def test_cleanup_handles_missing_pidfile(self): """Test that cleanup handles non-existent pidfile gracefully.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: pidfile = os.path.join(tmpdir, "nonexistent.pid") arbiter = DirtyArbiter(cfg=cfg, log=log, pidfile=pidfile) # Don't create the file assert not os.path.exists(pidfile) # Should not raise arbiter._cleanup_sync() def test_cleanup_without_pidfile(self): """Test that cleanup works when no pidfile configured.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) assert arbiter.pidfile is None # Should not raise arbiter._cleanup_sync() class TestDirtyArbiterPidfileWrite: """Tests for PID file writing during run().""" def test_run_writes_pidfile(self): """Test that run() writes the PID to the pidfile.""" from unittest import mock cfg = Config() cfg.set("dirty_workers", 0) cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: pidfile = os.path.join(tmpdir, "dirty.pid") arbiter = DirtyArbiter(cfg=cfg, log=log, pidfile=pidfile) # Track if PID file was written correctly pid_written = None def mock_asyncio_run(coro): nonlocal pid_written # At this point, PID file should have been written if os.path.exists(pidfile): with open(pidfile) as f: pid_written = int(f.read().strip()) # Close coroutine to avoid "never awaited" warning coro.close() # Mock asyncio.run to check PID file before cleanup runs with mock.patch.object(asyncio, 'run', side_effect=mock_asyncio_run): arbiter.run() # Check PID was written correctly assert pid_written == os.getpid() def test_run_without_pidfile_does_not_fail(self): """Test that run() works when no pidfile configured.""" from unittest import mock cfg = Config() cfg.set("dirty_workers", 0) cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) def mock_asyncio_run(coro): # Close coroutine to avoid "never awaited" warning coro.close() with mock.patch.object(asyncio, 'run', side_effect=mock_asyncio_run): # Should not raise arbiter.run() class TestDirtyArbiterRouteRequest: """Tests for request routing.""" @pytest.mark.asyncio async def test_route_request_no_workers(self): """Test routing request when no workers available.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() request = make_request( request_id="test-123", app_path="test:App", action="test" ) writer = MockStreamWriter() await arbiter.route_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert "No dirty workers available" in response["error"]["message"] arbiter._cleanup_sync() class TestDirtyArbiterWorkerManagement: """Tests for worker management (without actually forking).""" def test_cleanup_worker(self): """Test worker cleanup method.""" cfg = Config() cfg.set("dirty_workers", 2) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Simulate a worker being registered fake_pid = 99999 arbiter.workers[fake_pid] = "fake_worker" arbiter.worker_sockets[fake_pid] = "/tmp/fake.sock" arbiter._cleanup_worker(fake_pid) assert fake_pid not in arbiter.workers assert fake_pid not in arbiter.worker_sockets arbiter._cleanup_sync() @pytest.mark.asyncio async def test_cleanup_worker_cancels_consumer(self): """Test that worker cleanup cancels consumer task and removes queue.""" cfg = Config() cfg.set("dirty_workers", 2) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True # Simulate a worker with queue and consumer fake_pid = 99999 arbiter.workers[fake_pid] = "fake_worker" arbiter.worker_sockets[fake_pid] = "/tmp/fake.sock" # Create queue and mock consumer task arbiter.worker_queues[fake_pid] = asyncio.Queue() async def mock_consumer(): try: while True: await asyncio.sleep(1) except asyncio.CancelledError: pass arbiter.worker_consumers[fake_pid] = asyncio.create_task(mock_consumer()) arbiter._cleanup_worker(fake_pid) assert fake_pid not in arbiter.workers assert fake_pid not in arbiter.worker_sockets assert fake_pid not in arbiter.worker_queues assert fake_pid not in arbiter.worker_consumers arbiter._cleanup_sync() def test_reap_workers_no_children(self): """Test reap_workers when no children have exited.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Should not raise even with no children arbiter.reap_workers() arbiter._cleanup_sync() def test_close_worker_connection(self): """Test _close_worker_connection method.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Mock connection class MockWriter: def __init__(self): self.closed = False def close(self): self.closed = True mock_writer = MockWriter() mock_reader = object() arbiter.worker_connections[99999] = (mock_reader, mock_writer) arbiter._close_worker_connection(99999) assert 99999 not in arbiter.worker_connections assert mock_writer.closed is True arbiter._cleanup_sync() def test_close_worker_connection_not_exists(self): """Test _close_worker_connection when connection doesn't exist.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Should not raise arbiter._close_worker_connection(99999) arbiter._cleanup_sync() class TestDirtyArbiterSignals: """Tests for signal handling.""" def test_signal_handler_sigterm(self): """Test SIGTERM handling.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() assert arbiter.alive is True arbiter._signal_handler(signal.SIGTERM, None) assert arbiter.alive is False arbiter._cleanup_sync() def test_signal_handler_sigquit(self): """Test SIGQUIT handling.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() assert arbiter.alive is True arbiter._signal_handler(signal.SIGQUIT, None) assert arbiter.alive is False arbiter._cleanup_sync() def test_signal_handler_sigint(self): """Test SIGINT handling.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() assert arbiter.alive is True arbiter._signal_handler(signal.SIGINT, None) assert arbiter.alive is False arbiter._cleanup_sync() def test_signal_handler_sigusr1_reopens_logs(self): """Test SIGUSR1 reopens log files.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() assert arbiter.alive is True arbiter._signal_handler(signal.SIGUSR1, None) # Should NOT set alive to False assert arbiter.alive is True arbiter._cleanup_sync() def test_signal_handler_with_loop(self): """Test signal handler calls _shutdown with loop.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Create mock loop loop = asyncio.new_event_loop() arbiter._loop = loop shutdown_called = [] def mock_call_soon_threadsafe(cb): shutdown_called.append(cb) loop.call_soon_threadsafe = mock_call_soon_threadsafe arbiter._signal_handler(signal.SIGTERM, None) assert arbiter.alive is False assert len(shutdown_called) == 1 loop.close() arbiter._cleanup_sync() class TestDirtyArbiterShutdown: """Tests for shutdown.""" def test_shutdown_closes_server(self): """Test that _shutdown closes the server.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) class MockServer: def __init__(self): self.closed = False def close(self): self.closed = True arbiter._server = MockServer() arbiter._shutdown() assert arbiter._server.closed is True arbiter._cleanup_sync() def test_shutdown_without_server(self): """Test _shutdown when server is None.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Should not raise arbiter._shutdown() arbiter._cleanup_sync() def test_init_signals(self): """Test init_signals sets up signal handlers.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) original = signal.getsignal(signal.SIGTERM) try: arbiter.init_signals() assert signal.getsignal(signal.SIGTERM) == arbiter._signal_handler assert signal.getsignal(signal.SIGQUIT) == arbiter._signal_handler assert signal.getsignal(signal.SIGINT) == arbiter._signal_handler assert signal.getsignal(signal.SIGHUP) == arbiter._signal_handler assert signal.getsignal(signal.SIGUSR1) == arbiter._signal_handler assert signal.getsignal(signal.SIGCHLD) == arbiter._signal_handler finally: signal.signal(signal.SIGTERM, original) arbiter._cleanup_sync() class TestDirtyArbiterRouteTimeout: """Tests for request timeout handling.""" @pytest.mark.asyncio async def test_route_request_timeout(self): """Test that route_request handles timeout correctly.""" cfg = Config() cfg.set("dirty_workers", 0) cfg.set("dirty_timeout", 1) # 1 second timeout log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Register a fake worker fake_pid = 99999 arbiter.workers[fake_pid] = "fake_worker" arbiter.worker_sockets[fake_pid] = "/tmp/nonexistent.sock" request = make_request( request_id="timeout-test", app_path="test:App", action="slow_action" ) # This should fail because socket doesn't exist writer = MockStreamWriter() await arbiter.route_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR # Either "Worker communication failed" or "Worker socket not ready" assert "error" in response arbiter._cleanup_sync() @pytest.mark.asyncio async def test_get_available_worker_returns_first(self): """Test _get_available_worker returns first worker.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # No workers result = await arbiter._get_available_worker() assert result is None # Add workers arbiter.workers[1001] = "worker1" arbiter.workers[1002] = "worker2" result = await arbiter._get_available_worker() assert result in [1001, 1002] arbiter._cleanup_sync() class TestDirtyArbiterWorkerConnection: """Tests for worker connection management.""" @pytest.mark.asyncio async def test_get_worker_connection_cached(self): """Test that _get_worker_connection returns cached connection.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # Set up cached connection mock_reader = object() mock_writer = object() arbiter.worker_connections[99999] = (mock_reader, mock_writer) arbiter.worker_sockets[99999] = "/tmp/test.sock" reader, writer = await arbiter._get_worker_connection(99999) assert reader is mock_reader assert writer is mock_writer arbiter._cleanup_sync() @pytest.mark.asyncio async def test_get_worker_connection_no_socket(self): """Test _get_worker_connection fails when no socket path.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.workers[99999] = "fake_worker" # No socket path registered with pytest.raises(DirtyError) as exc_info: await arbiter._get_worker_connection(99999) assert "No socket for worker" in str(exc_info.value) arbiter._cleanup_sync() @pytest.mark.asyncio async def test_get_worker_connection_socket_not_ready(self): """Test _get_worker_connection when socket file doesn't exist.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.workers[99999] = "fake_worker" arbiter.worker_sockets[99999] = "/tmp/nonexistent_socket_12345.sock" with pytest.raises(DirtyError) as exc_info: await arbiter._get_worker_connection(99999) assert "Worker socket not ready" in str(exc_info.value) arbiter._cleanup_sync() class TestDirtyArbiterManageWorkers: """Tests for worker pool management.""" @pytest.mark.asyncio async def test_manage_workers_zero_target(self): """Test manage_workers with zero target workers.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Should not spawn any workers await arbiter.manage_workers() assert len(arbiter.workers) == 0 arbiter._cleanup_sync() class TestDirtyArbiterKillWorker: """Tests for killing workers.""" def test_kill_worker_no_process(self): """Test kill_worker when process doesn't exist.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Register fake worker arbiter.workers[99999] = "fake_worker" arbiter.worker_sockets[99999] = "/tmp/fake.sock" # Kill non-existent process - should cleanup arbiter.kill_worker(99999, signal.SIGTERM) # Worker should be cleaned up assert 99999 not in arbiter.workers arbiter._cleanup_sync() class TestDirtyArbiterMurderWorkers: """Tests for worker timeout detection.""" @pytest.mark.asyncio async def test_murder_workers_no_timeout_config(self): """Test murder_workers with no timeout configured.""" cfg = Config() cfg.set("dirty_timeout", 0) # Disabled log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Should return early without checking await arbiter.murder_workers() arbiter._cleanup_sync() class TestDirtyArbiterStop: """Tests for stop functionality.""" @pytest.mark.asyncio async def test_stop_graceful(self): """Test graceful stop with no workers.""" cfg = Config() cfg.set("dirty_graceful_timeout", 1) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # No workers - should complete quickly await arbiter.stop(graceful=True) arbiter._cleanup_sync() @pytest.mark.asyncio async def test_stop_not_graceful(self): """Test non-graceful stop.""" cfg = Config() cfg.set("dirty_graceful_timeout", 1) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() await arbiter.stop(graceful=False) arbiter._cleanup_sync() class TestDirtyArbiterReload: """Tests for reload functionality.""" @pytest.mark.asyncio async def test_reload_with_no_workers(self): """Test reload when no workers exist.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() # Should complete without spawning await arbiter.reload() assert len(arbiter.workers) == 0 arbiter._cleanup_sync() class TestDirtyArbiterRunAsync: """Tests for async run loop.""" @pytest.mark.asyncio async def test_run_async_creates_server(self): """Test that _run_async creates Unix server.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test_arbiter.sock") arbiter = DirtyArbiter(cfg=cfg, log=log, socket_path=socket_path) arbiter.pid = os.getpid() # Run briefly and stop async def run_briefly(): arbiter._loop = asyncio.get_running_loop() if os.path.exists(socket_path): os.unlink(socket_path) arbiter._server = await asyncio.start_unix_server( arbiter.handle_client, path=socket_path ) os.chmod(socket_path, 0o600) # Verify socket exists assert os.path.exists(socket_path) # Shutdown arbiter._server.close() await arbiter._server.wait_closed() await run_briefly() arbiter._cleanup_sync() class TestDirtyArbiterHandleClient: """Tests for client connection handling.""" @pytest.mark.asyncio async def test_handle_client_connection_close(self): """Test handle_client when connection closes.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() arbiter.alive = True # Create reader that returns EOF reader = asyncio.StreamReader() reader.feed_eof() class MockWriter: def __init__(self): self.closed = False def close(self): self.closed = True async def wait_closed(self): pass writer = MockWriter() # Should exit without error when EOF is received await arbiter.handle_client(reader, writer) assert writer.closed is True arbiter._cleanup_sync() class TestDirtyArbiterWorkerMonitor: """Tests for worker monitoring.""" @pytest.mark.asyncio async def test_worker_monitor_loop(self): """Test worker monitor runs periodically.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() arbiter.ppid = os.getppid() # Match actual parent for ppid check arbiter.alive = True monitor_calls = 0 async def mock_murder_workers(): nonlocal monitor_calls monitor_calls += 1 if monitor_calls >= 2: arbiter.alive = False async def mock_manage_workers(): pass arbiter.murder_workers = mock_murder_workers arbiter.manage_workers = mock_manage_workers # Run monitor briefly await arbiter._worker_monitor() assert monitor_calls >= 2 arbiter._cleanup_sync() @pytest.mark.asyncio async def test_worker_monitor_detects_parent_death(self): """Test worker monitor exits when parent dies.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() arbiter.ppid = 99999 # Fake parent PID that doesn't match os.getppid() arbiter.alive = True shutdown_called = [] def mock_shutdown(): shutdown_called.append(True) arbiter._shutdown = mock_shutdown # Run monitor - should detect parent change and exit await arbiter._worker_monitor() # Should have detected parent death assert arbiter.alive is False assert len(shutdown_called) == 1 # Check log message log_messages = [msg for level, msg in log.messages if level == "warning"] assert any("Parent changed" in msg for msg in log_messages) arbiter._cleanup_sync() class TestDirtyArbiterHandleSigchld: """Tests for SIGCHLD handling.""" @pytest.mark.asyncio async def test_handle_sigchld_reaps_workers(self): """Test _handle_sigchld calls reap_workers and manage_workers.""" cfg = Config() cfg.set("dirty_workers", 0) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() reap_called = [] manage_called = [] def mock_reap(): reap_called.append(True) async def mock_manage(): manage_called.append(True) arbiter.reap_workers = mock_reap arbiter.manage_workers = mock_manage await arbiter._handle_sigchld() assert len(reap_called) == 1 assert len(manage_called) == 1 arbiter._cleanup_sync() def test_sigchld_handler_with_loop(self): """Test SIGCHLD signal creates task on loop.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() loop = asyncio.new_event_loop() arbiter._loop = loop tasks_scheduled = [] def mock_call_soon_threadsafe(cb): tasks_scheduled.append(cb) loop.call_soon_threadsafe = mock_call_soon_threadsafe arbiter._signal_handler(signal.SIGCHLD, None) assert len(tasks_scheduled) == 1 loop.close() arbiter._cleanup_sync() class TestDirtyArbiterSighupHandler: """Tests for SIGHUP (reload) handling.""" def test_sighup_handler_with_loop(self): """Test SIGHUP signal schedules reload.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() loop = asyncio.new_event_loop() arbiter._loop = loop tasks_scheduled = [] def mock_call_soon_threadsafe(cb): tasks_scheduled.append(cb) loop.call_soon_threadsafe = mock_call_soon_threadsafe arbiter._signal_handler(signal.SIGHUP, None) # Should still be alive (SIGHUP is reload, not shutdown) assert arbiter.alive is True assert len(tasks_scheduled) == 1 loop.close() arbiter._cleanup_sync() class TestDirtyArbiterQueueBehavior: """Tests for queue-based request routing.""" @pytest.mark.asyncio async def test_start_worker_consumer_creates_queue_and_task(self): """Test _start_worker_consumer creates queue and task.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.alive = True fake_pid = 99999 await arbiter._start_worker_consumer(fake_pid) assert fake_pid in arbiter.worker_queues assert fake_pid in arbiter.worker_consumers assert isinstance(arbiter.worker_queues[fake_pid], asyncio.Queue) assert isinstance(arbiter.worker_consumers[fake_pid], asyncio.Task) # Cancel task for cleanup arbiter.worker_consumers[fake_pid].cancel() try: await arbiter.worker_consumers[fake_pid] except asyncio.CancelledError: pass arbiter._cleanup_sync() @pytest.mark.asyncio async def test_route_request_starts_consumer_on_demand(self): """Test route_request starts consumer if not exists.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() arbiter.alive = True # Register fake worker fake_pid = 99999 arbiter.workers[fake_pid] = "fake_worker" arbiter.worker_sockets[fake_pid] = "/tmp/nonexistent.sock" assert fake_pid not in arbiter.worker_queues assert fake_pid not in arbiter.worker_consumers # Make request - should start consumer request = make_request( request_id="test-123", app_path="test:App", action="test" ) # This will fail (no socket), but consumer should be started writer = MockStreamWriter() await arbiter.route_request(request, writer) assert fake_pid in arbiter.worker_queues assert fake_pid in arbiter.worker_consumers # Cleanup arbiter.alive = False arbiter.worker_consumers[fake_pid].cancel() try: await arbiter.worker_consumers[fake_pid] except asyncio.CancelledError: pass arbiter._cleanup_sync() @pytest.mark.asyncio async def test_stop_cancels_all_consumers(self): """Test stop() cancels all consumer tasks.""" cfg = Config() cfg.set("dirty_graceful_timeout", 1) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = os.getpid() arbiter.alive = True # Create mock consumers async def mock_consumer(): try: while True: await asyncio.sleep(1) except asyncio.CancelledError: pass task1 = asyncio.create_task(mock_consumer()) task2 = asyncio.create_task(mock_consumer()) arbiter.worker_consumers[1] = task1 arbiter.worker_consumers[2] = task2 await arbiter.stop(graceful=True) # Allow cancelled tasks to complete await asyncio.sleep(0) # All consumers should be done (cancelled and caught) assert task1.done() assert task2.done() arbiter._cleanup_sync() class TestDirtyArbiterAppTracking: """Tests for per-app worker tracking.""" def test_parse_app_specs_standard_format(self): """All standard format apps have worker_count=None.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) assert len(arbiter.app_specs) == 2 assert arbiter.app_specs["tests.support_dirty_app:TestDirtyApp"]["worker_count"] is None assert arbiter.app_specs["tests.support_dirty_app:SlowDirtyApp"]["worker_count"] is None arbiter._cleanup_sync() def test_parse_app_specs_with_worker_count(self): """Apps with :N have correct worker_count.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp:2", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) assert arbiter.app_specs["tests.support_dirty_app:TestDirtyApp"]["worker_count"] is None assert arbiter.app_specs["tests.support_dirty_app:SlowDirtyApp"]["worker_count"] == 2 arbiter._cleanup_sync() def test_get_apps_for_new_worker_all_standard(self): """All apps returned when all have workers=None.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", "tests.support_dirty_app:SlowDirtyApp", ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) apps = arbiter._get_apps_for_new_worker() assert len(apps) == 2 assert "tests.support_dirty_app:TestDirtyApp" in apps assert "tests.support_dirty_app:SlowDirtyApp" in apps arbiter._cleanup_sync() def test_get_apps_for_new_worker_respects_limit(self): """App with workers=2 stops assigning after 2 workers.""" cfg = Config() cfg.set("dirty_apps", [ "tests.support_dirty_app:TestDirtyApp", # unlimited "tests.support_dirty_app:SlowDirtyApp:2", # limited to 2 ]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) # First worker should get both apps apps1 = arbiter._get_apps_for_new_worker() assert len(apps1) == 2 arbiter._register_worker_apps(1001, apps1) # Second worker should get both apps apps2 = arbiter._get_apps_for_new_worker() assert len(apps2) == 2 arbiter._register_worker_apps(1002, apps2) # Third worker should only get unlimited app apps3 = arbiter._get_apps_for_new_worker() assert len(apps3) == 1 assert "tests.support_dirty_app:TestDirtyApp" in apps3 assert "tests.support_dirty_app:SlowDirtyApp" not in apps3 arbiter._cleanup_sync() def test_register_worker_apps_updates_both_maps(self): """Both app_worker_map and worker_app_map updated.""" cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) app_path = "tests.support_dirty_app:TestDirtyApp" arbiter._register_worker_apps(1001, [app_path]) # Check app_worker_map assert 1001 in arbiter.app_worker_map[app_path] # Check worker_app_map assert app_path in arbiter.worker_app_map[1001] arbiter._cleanup_sync() def test_unregister_worker_cleans_both_maps(self): """Worker removal updates both maps correctly.""" cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) app_path = "tests.support_dirty_app:TestDirtyApp" arbiter._register_worker_apps(1001, [app_path]) # Verify registered assert 1001 in arbiter.app_worker_map[app_path] assert 1001 in arbiter.worker_app_map # Unregister arbiter._unregister_worker(1001) # Verify cleaned up assert 1001 not in arbiter.app_worker_map[app_path] assert 1001 not in arbiter.worker_app_map arbiter._cleanup_sync() class TestDirtyArbiterSpawnWorkerPerApp: """Tests for spawn_worker with per-app allocation.""" def test_cleanup_worker_queues_apps_for_respawn(self): """Dead worker's apps added to _pending_respawns.""" cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = 12345 # Simulate worker registration app_path = "tests.support_dirty_app:TestDirtyApp" arbiter.workers[1001] = "fake_worker" arbiter.worker_sockets[1001] = "/tmp/fake.sock" arbiter._register_worker_apps(1001, [app_path]) # Cleanup should queue apps for respawn assert len(arbiter._pending_respawns) == 0 arbiter._cleanup_worker(1001) assert len(arbiter._pending_respawns) == 1 assert app_path in arbiter._pending_respawns[0] arbiter._cleanup_sync() def test_pending_respawns_cleared_after_spawn(self): """Pending respawns consumed when spawning new worker.""" cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = 12345 # Add pending respawn app_path = "tests.support_dirty_app:TestDirtyApp" arbiter._pending_respawns.append([app_path]) # Get apps for new worker should use pending first # But since spawn_worker forks, we test the logic directly assert len(arbiter._pending_respawns) == 1 # When spawn_worker pops from pending_respawns apps = arbiter._pending_respawns.pop(0) assert apps == [app_path] assert len(arbiter._pending_respawns) == 0 arbiter._cleanup_sync() class TestDirtyArbiterRoutingPerApp: """Tests for app-aware routing.""" @pytest.mark.asyncio async def test_get_available_worker_no_filter(self): """Without app_path, returns any worker round-robin.""" cfg = Config() log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.workers[1001] = "worker1" arbiter.workers[1002] = "worker2" # Should return workers in round-robin w1 = await arbiter._get_available_worker() w2 = await arbiter._get_available_worker() assert w1 in [1001, 1002] assert w2 in [1001, 1002] # They should be different (round-robin) if len(arbiter.workers) >= 2: assert w1 != w2 or len(arbiter.workers) == 1 arbiter._cleanup_sync() @pytest.mark.asyncio async def test_get_available_worker_with_app_filter(self): """With app_path, returns only workers that have it.""" cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.workers[1001] = "worker1" arbiter.workers[1002] = "worker2" # Only register 1001 for the app app_path = "tests.support_dirty_app:TestDirtyApp" arbiter._register_worker_apps(1001, [app_path]) # Should only return 1001 worker = await arbiter._get_available_worker(app_path) assert worker == 1001 arbiter._cleanup_sync() @pytest.mark.asyncio async def test_get_available_worker_app_no_workers_returns_none(self): """Returns None if no workers have the app.""" cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.workers[1001] = "worker1" # Worker 1001 has no apps registered - request for unknown app returns None worker = await arbiter._get_available_worker("unknown:App") assert worker is None arbiter._cleanup_sync() @pytest.mark.asyncio async def test_route_request_app_not_loaded_error(self): """Error response when no worker has the app.""" from gunicorn.dirty.protocol import DirtyProtocol cfg = Config() cfg.set("dirty_apps", ["tests.support_dirty_app:TestDirtyApp"]) log = MockLog() arbiter = DirtyArbiter(cfg=cfg, log=log) arbiter.pid = 12345 arbiter.workers[1001] = "worker1" # No apps registered for this worker (worker exists but has no apps) request = make_request( request_id="test-123", app_path="unknown:App", action="test" ) writer = MockStreamWriter() await arbiter.route_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert "No workers available for app" in response["error"]["message"] assert response["error"]["error_type"] == "DirtyNoWorkersAvailableError" arbiter._cleanup_sync() benoitc-gunicorn-f5fb19e/tests/test_dirty_client.py000066400000000000000000000244641514360242400227410ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty client module.""" import os import socket import tempfile import threading import pytest from gunicorn.dirty.client import ( DirtyClient, get_dirty_client, get_dirty_socket_path, set_dirty_socket_path, close_dirty_client, ) from gunicorn.dirty.errors import DirtyConnectionError, DirtyError from gunicorn.dirty.protocol import DirtyProtocol, make_response class TestDirtyClientInit: """Tests for DirtyClient initialization.""" def test_init_attributes(self): """Test that client is initialized with correct attributes.""" client = DirtyClient("/tmp/test.sock", timeout=60.0) assert client.socket_path == "/tmp/test.sock" assert client.timeout == 60.0 assert client._sock is None assert client._reader is None assert client._writer is None class TestDirtyClientSync: """Tests for sync API.""" def test_connect_nonexistent_socket(self): """Test connecting to non-existent socket.""" client = DirtyClient("/nonexistent/socket.sock") with pytest.raises(DirtyConnectionError) as exc_info: client.connect() assert "Failed to connect" in str(exc_info.value) def test_connect_success(self): """Test successful connection.""" with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") # Create a listening socket server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) try: client = DirtyClient(socket_path) client.connect() assert client._sock is not None client.close() finally: server_sock.close() def test_close_idempotent(self): """Test that close can be called multiple times.""" client = DirtyClient("/tmp/test.sock") client.close() client.close() # Should not raise class TestDirtyClientAsync: """Tests for async API.""" @pytest.mark.asyncio async def test_connect_async_nonexistent_socket(self): """Test async connecting to non-existent socket.""" client = DirtyClient("/nonexistent/socket.sock", timeout=1.0) with pytest.raises(DirtyConnectionError): await client.connect_async() @pytest.mark.asyncio async def test_close_async_idempotent(self): """Test that close_async can be called multiple times.""" client = DirtyClient("/tmp/test.sock") await client.close_async() await client.close_async() # Should not raise class TestDirtyClientContextManagers: """Tests for context manager functionality.""" def test_sync_context_manager_connection_error(self): """Test sync context manager with connection error.""" client = DirtyClient("/nonexistent/socket.sock") with pytest.raises(DirtyConnectionError): with client: pass @pytest.mark.asyncio async def test_async_context_manager_connection_error(self): """Test async context manager with connection error.""" client = DirtyClient("/nonexistent/socket.sock", timeout=1.0) with pytest.raises(DirtyConnectionError): async with client: pass class TestDirtyClientHelpers: """Tests for helper functions.""" def test_set_get_socket_path(self): """Test setting and getting socket path.""" original = os.environ.get('GUNICORN_DIRTY_SOCKET') try: set_dirty_socket_path("/tmp/dirty.sock") assert get_dirty_socket_path() == "/tmp/dirty.sock" finally: set_dirty_socket_path(None) if original: os.environ['GUNICORN_DIRTY_SOCKET'] = original def test_get_socket_path_from_env(self): """Test getting socket path from environment.""" original = os.environ.get('GUNICORN_DIRTY_SOCKET') try: set_dirty_socket_path(None) os.environ['GUNICORN_DIRTY_SOCKET'] = "/env/dirty.sock" assert get_dirty_socket_path() == "/env/dirty.sock" finally: set_dirty_socket_path(None) if original: os.environ['GUNICORN_DIRTY_SOCKET'] = original else: os.environ.pop('GUNICORN_DIRTY_SOCKET', None) def test_get_socket_path_not_configured(self): """Test error when socket path not configured.""" original = os.environ.get('GUNICORN_DIRTY_SOCKET') try: set_dirty_socket_path(None) os.environ.pop('GUNICORN_DIRTY_SOCKET', None) with pytest.raises(DirtyError) as exc_info: get_dirty_socket_path() assert "not configured" in str(exc_info.value) finally: if original: os.environ['GUNICORN_DIRTY_SOCKET'] = original def test_get_dirty_client_thread_local(self): """Test that get_dirty_client returns thread-local client.""" original = os.environ.get('GUNICORN_DIRTY_SOCKET') try: set_dirty_socket_path("/tmp/test.sock") # Clean up any existing client close_dirty_client() client1 = get_dirty_client() client2 = get_dirty_client() # Should return same instance in same thread assert client1 is client2 close_dirty_client() finally: set_dirty_socket_path(None) if original: os.environ['GUNICORN_DIRTY_SOCKET'] = original def test_get_dirty_client_different_threads(self): """Test that different threads get different clients.""" original = os.environ.get('GUNICORN_DIRTY_SOCKET') clients = [] try: set_dirty_socket_path("/tmp/test.sock") def get_client(): clients.append(get_dirty_client()) close_dirty_client() # Clean up main thread client close_dirty_client() t1 = threading.Thread(target=get_client) t2 = threading.Thread(target=get_client) t1.start() t2.start() t1.join() t2.join() # Different threads should get different clients assert len(clients) == 2 assert clients[0] is not clients[1] finally: set_dirty_socket_path(None) if original: os.environ['GUNICORN_DIRTY_SOCKET'] = original def test_close_dirty_client(self): """Test closing thread-local client.""" original = os.environ.get('GUNICORN_DIRTY_SOCKET') try: set_dirty_socket_path("/tmp/test.sock") client = get_dirty_client() close_dirty_client() # Should be able to get a new client client2 = get_dirty_client() assert client2 is not client close_dirty_client() finally: set_dirty_socket_path(None) if original: os.environ['GUNICORN_DIRTY_SOCKET'] = original class TestDirtyClientResponseHandling: """Tests for response handling.""" def test_handle_response_success(self): """Test handling successful response.""" client = DirtyClient("/tmp/test.sock") response = make_response("test-id", {"data": "value"}) result = client._handle_response(response) assert result == {"data": "value"} def test_handle_response_error(self): """Test handling error response.""" client = DirtyClient("/tmp/test.sock") response = { "type": DirtyProtocol.MSG_TYPE_ERROR, "id": "test-id", "error": { "error_type": "DirtyError", "message": "Test error", "details": {}, }, } with pytest.raises(DirtyError) as exc_info: client._handle_response(response) assert "Test error" in str(exc_info.value) def test_handle_response_unknown_type(self): """Test handling unknown response type.""" client = DirtyClient("/tmp/test.sock") response = { "type": "unknown", "id": "test-id", } with pytest.raises(DirtyError) as exc_info: client._handle_response(response) assert "Unknown response type" in str(exc_info.value) class TestDirtyClientExecute: """Tests for execute functionality with mock sockets.""" def test_execute_with_socket_pair(self): """Test execute using a socket pair to simulate server.""" import threading with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "test.sock") # Create server socket server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server_sock.bind(socket_path) server_sock.listen(1) response_sent = threading.Event() def server_handler(): conn, _ = server_sock.accept() try: # Read request msg = DirtyProtocol.read_message(conn) # Send response resp = make_response(msg["id"], {"result": "success"}) DirtyProtocol.write_message(conn, resp) response_sent.set() finally: conn.close() server_thread = threading.Thread(target=server_handler) server_thread.start() try: client = DirtyClient(socket_path, timeout=5.0) result = client.execute("test:App", "action", "arg1", key="value") assert result == {"result": "success"} client.close() finally: response_sent.wait(timeout=2.0) server_thread.join(timeout=2.0) server_sock.close() def test_close_socket_clears_sock(self): """Test that _close_socket clears the socket.""" client = DirtyClient("/tmp/test.sock") # Simulate having a socket client._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) client._close_socket() assert client._sock is None benoitc-gunicorn-f5fb19e/tests/test_dirty_config.py000066400000000000000000000110551514360242400227200ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty arbiter configuration settings.""" import pytest from gunicorn.config import Config class TestDirtyConfig: """Tests for dirty arbiter configuration settings.""" def test_dirty_apps_default(self): """Test dirty_apps default is empty list.""" cfg = Config() assert cfg.dirty_apps == [] def test_dirty_apps_single(self): """Test dirty_apps with single app.""" cfg = Config() cfg.set("dirty_apps", ["myapp.ml:MLApp"]) assert cfg.dirty_apps == ["myapp.ml:MLApp"] def test_dirty_apps_multiple(self): """Test dirty_apps with multiple apps.""" cfg = Config() cfg.set("dirty_apps", [ "myapp.ml:MLApp", "myapp.images:ImageApp", ]) assert len(cfg.dirty_apps) == 2 assert "myapp.ml:MLApp" in cfg.dirty_apps assert "myapp.images:ImageApp" in cfg.dirty_apps def test_dirty_workers_default(self): """Test dirty_workers default is 0 (disabled).""" cfg = Config() assert cfg.dirty_workers == 0 def test_dirty_workers_set(self): """Test setting dirty_workers.""" cfg = Config() cfg.set("dirty_workers", 2) assert cfg.dirty_workers == 2 def test_dirty_workers_invalid_negative(self): """Test dirty_workers rejects negative values.""" cfg = Config() with pytest.raises(ValueError): cfg.set("dirty_workers", -1) def test_dirty_timeout_default(self): """Test dirty_timeout default is 300 seconds.""" cfg = Config() assert cfg.dirty_timeout == 300 def test_dirty_timeout_set(self): """Test setting dirty_timeout.""" cfg = Config() cfg.set("dirty_timeout", 600) assert cfg.dirty_timeout == 600 def test_dirty_timeout_zero_disables(self): """Test dirty_timeout can be set to 0 to disable.""" cfg = Config() cfg.set("dirty_timeout", 0) assert cfg.dirty_timeout == 0 def test_dirty_threads_default(self): """Test dirty_threads default is 1.""" cfg = Config() assert cfg.dirty_threads == 1 def test_dirty_threads_set(self): """Test setting dirty_threads.""" cfg = Config() cfg.set("dirty_threads", 4) assert cfg.dirty_threads == 4 def test_dirty_graceful_timeout_default(self): """Test dirty_graceful_timeout default is 30 seconds.""" cfg = Config() assert cfg.dirty_graceful_timeout == 30 def test_dirty_graceful_timeout_set(self): """Test setting dirty_graceful_timeout.""" cfg = Config() cfg.set("dirty_graceful_timeout", 60) assert cfg.dirty_graceful_timeout == 60 def test_all_dirty_settings_accessible(self): """Test all dirty settings are accessible.""" cfg = Config() # These should not raise AttributeError _ = cfg.dirty_apps _ = cfg.dirty_workers _ = cfg.dirty_timeout _ = cfg.dirty_threads _ = cfg.dirty_graceful_timeout class TestDirtyConfigCLI: """Tests for dirty arbiter CLI argument parsing.""" def test_dirty_workers_cli(self): """Test --dirty-workers CLI argument.""" cfg = Config() parser = cfg.parser() args = parser.parse_args(["--dirty-workers", "3"]) assert args.dirty_workers == 3 def test_dirty_timeout_cli(self): """Test --dirty-timeout CLI argument.""" cfg = Config() parser = cfg.parser() args = parser.parse_args(["--dirty-timeout", "600"]) assert args.dirty_timeout == 600 def test_dirty_threads_cli(self): """Test --dirty-threads CLI argument.""" cfg = Config() parser = cfg.parser() args = parser.parse_args(["--dirty-threads", "8"]) assert args.dirty_threads == 8 def test_dirty_graceful_timeout_cli(self): """Test --dirty-graceful-timeout CLI argument.""" cfg = Config() parser = cfg.parser() args = parser.parse_args(["--dirty-graceful-timeout", "45"]) assert args.dirty_graceful_timeout == 45 def test_dirty_app_cli(self): """Test --dirty-app CLI argument (can be repeated).""" cfg = Config() parser = cfg.parser() args = parser.parse_args([ "--dirty-app", "myapp.ml:MLApp", "--dirty-app", "myapp.images:ImageApp", ]) assert args.dirty_apps == ["myapp.ml:MLApp", "myapp.images:ImageApp"] benoitc-gunicorn-f5fb19e/tests/test_dirty_errors.py000066400000000000000000000051671514360242400227760ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty errors module.""" import pytest from gunicorn.dirty.errors import ( DirtyError, DirtyNoWorkersAvailableError, ) class TestDirtyNoWorkersAvailableError: """Tests for DirtyNoWorkersAvailableError exception.""" def test_error_contains_app_path(self): """Error includes the app_path.""" error = DirtyNoWorkersAvailableError("myapp:Model") assert error.app_path == "myapp:Model" assert "myapp:Model" in str(error) assert "No workers available" in str(error) def test_error_with_custom_message(self): """Error can have a custom message.""" error = DirtyNoWorkersAvailableError( "myapp:Model", message="Custom: no workers for heavy model" ) assert error.app_path == "myapp:Model" assert "Custom: no workers" in str(error) def test_error_serialization_roundtrip(self): """Error survives to_dict/from_dict cycle.""" original = DirtyNoWorkersAvailableError("myapp.ml:HugeModel") # Serialize data = original.to_dict() assert data["error_type"] == "DirtyNoWorkersAvailableError" assert data["details"]["app_path"] == "myapp.ml:HugeModel" # Deserialize restored = DirtyError.from_dict(data) assert isinstance(restored, DirtyNoWorkersAvailableError) assert restored.app_path == "myapp.ml:HugeModel" assert "No workers available" in str(restored) def test_error_is_dirty_error_subclass(self): """DirtyNoWorkersAvailableError is a DirtyError subclass.""" error = DirtyNoWorkersAvailableError("app:Class") assert isinstance(error, DirtyError) def test_web_app_can_catch_specific_error(self): """Web app can catch DirtyNoWorkersAvailableError specifically.""" def simulate_execute(): raise DirtyNoWorkersAvailableError("myapp:HeavyModel") # Catch specific error try: simulate_execute() assert False, "Should have raised" except DirtyNoWorkersAvailableError as e: assert e.app_path == "myapp:HeavyModel" def test_can_catch_as_base_error(self): """Can catch DirtyNoWorkersAvailableError as DirtyError.""" def simulate_execute(): raise DirtyNoWorkersAvailableError("myapp:Model") try: simulate_execute() assert False, "Should have raised" except DirtyError as e: # Should catch it as the base class assert hasattr(e, "app_path") benoitc-gunicorn-f5fb19e/tests/test_dirty_hooks.py000066400000000000000000000061131514360242400225750ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty arbiter hooks.""" import pytest from gunicorn.config import Config class TestDirtyHooksConfig: """Tests for dirty hook configuration settings.""" def test_on_dirty_starting_default(self): """Test on_dirty_starting default is a callable.""" cfg = Config() assert callable(cfg.on_dirty_starting) def test_on_dirty_starting_custom(self): """Test setting custom on_dirty_starting hook.""" hook_calls = [] def my_hook(arbiter): hook_calls.append(arbiter) cfg = Config() cfg.set("on_dirty_starting", my_hook) # Call the hook cfg.on_dirty_starting("test_arbiter") assert hook_calls == ["test_arbiter"] def test_dirty_post_fork_default(self): """Test dirty_post_fork default is a callable.""" cfg = Config() assert callable(cfg.dirty_post_fork) def test_dirty_post_fork_custom(self): """Test setting custom dirty_post_fork hook.""" hook_calls = [] def my_hook(arbiter, worker): hook_calls.append((arbiter, worker)) cfg = Config() cfg.set("dirty_post_fork", my_hook) # Call the hook cfg.dirty_post_fork("test_arbiter", "test_worker") assert hook_calls == [("test_arbiter", "test_worker")] def test_dirty_worker_init_default(self): """Test dirty_worker_init default is a callable.""" cfg = Config() assert callable(cfg.dirty_worker_init) def test_dirty_worker_init_custom(self): """Test setting custom dirty_worker_init hook.""" hook_calls = [] def my_hook(worker): hook_calls.append(worker) cfg = Config() cfg.set("dirty_worker_init", my_hook) # Call the hook cfg.dirty_worker_init("test_worker") assert hook_calls == ["test_worker"] def test_dirty_worker_exit_default(self): """Test dirty_worker_exit default is a callable.""" cfg = Config() assert callable(cfg.dirty_worker_exit) def test_dirty_worker_exit_custom(self): """Test setting custom dirty_worker_exit hook.""" hook_calls = [] def my_hook(arbiter, worker): hook_calls.append((arbiter, worker)) cfg = Config() cfg.set("dirty_worker_exit", my_hook) # Call the hook cfg.dirty_worker_exit("test_arbiter", "test_worker") assert hook_calls == [("test_arbiter", "test_worker")] class TestDirtyHooksValidation: """Tests for hook validation.""" def test_on_dirty_starting_requires_callable(self): """Test that on_dirty_starting requires a callable.""" cfg = Config() with pytest.raises(TypeError): cfg.set("on_dirty_starting", "not_a_callable") def test_dirty_post_fork_requires_callable(self): """Test that dirty_post_fork requires a callable.""" cfg = Config() with pytest.raises(TypeError): cfg.set("dirty_post_fork", 123) benoitc-gunicorn-f5fb19e/tests/test_dirty_integration.py000066400000000000000000000322041514360242400237750ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Integration tests for dirty arbiter with main arbiter.""" import os import struct import pytest from gunicorn.arbiter import Arbiter from gunicorn.config import Config from gunicorn.app.base import BaseApplication from gunicorn.dirty.protocol import DirtyProtocol, BinaryProtocol, HEADER_SIZE class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.closed = False def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): self.closed = True async def wait_closed(self): pass def get_extra_info(self, name): return None class SimpleDirtyTestApp(BaseApplication): """Simple test application for integration tests.""" def __init__(self, options=None): self.options = options or {} self.cfg = None super().__init__() def load_config(self): for key, value in self.options.items(): if key in self.cfg.settings: self.cfg.set(key.lower(), value) def load(self): def app(environ, start_response): status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] return app class TestArbiterDirtyIntegration: """Tests for arbiter integration with dirty arbiter.""" def test_arbiter_init_with_dirty_config(self): """Test arbiter initializes with dirty configuration.""" app = SimpleDirtyTestApp(options={ 'dirty_workers': 2, 'dirty_apps': ['tests.support_dirty_app:TestDirtyApp'], 'bind': '127.0.0.1:0', }) arbiter = Arbiter(app) assert arbiter.dirty_arbiter_pid == 0 assert arbiter.dirty_arbiter is None assert arbiter.cfg.dirty_workers == 2 assert arbiter.cfg.dirty_apps == ['tests.support_dirty_app:TestDirtyApp'] def test_arbiter_init_without_dirty_config(self): """Test arbiter initializes without dirty configuration.""" app = SimpleDirtyTestApp(options={ 'bind': '127.0.0.1:0', }) arbiter = Arbiter(app) assert arbiter.dirty_arbiter_pid == 0 assert arbiter.cfg.dirty_workers == 0 assert arbiter.cfg.dirty_apps == [] class TestDirtyIntegrationEnvironment: """Tests for environment setup.""" def test_dirty_socket_env_var_set(self): """Test that GUNICORN_DIRTY_SOCKET env var is set when dirty arbiter spawns.""" # This test would require actually spawning the dirty arbiter # which involves forking. We'll skip this for unit tests. pass class TestDirtyExecutionTimeout: """Tests for execution timeout handling.""" @pytest.mark.asyncio async def test_worker_to_worker_communication(self): """Test protocol communication between worker and arbiter.""" import asyncio import tempfile from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.protocol import DirtyProtocol, make_request class MockLog: def debug(self, *a, **kw): pass def info(self, *a, **kw): pass def warning(self, *a, **kw): pass def error(self, *a, **kw): pass def close_on_exec(self): pass def reopen_files(self): pass cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() worker.load_apps() # Start worker server server = await asyncio.start_unix_server( worker.handle_connection, path=socket_path ) # Connect as client reader, writer = await asyncio.open_unix_connection(socket_path) # Send a request request = make_request( request_id="timeout-test-1", app_path="tests.support_dirty_app:TestDirtyApp", action="compute", args=(10, 5), kwargs={"operation": "add"} ) await DirtyProtocol.write_message_async(writer, request) # Receive response response = await DirtyProtocol.read_message_async(reader) assert response["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert response["result"] == 15 # Cleanup writer.close() await writer.wait_closed() server.close() await server.wait_closed() worker._cleanup() @pytest.mark.asyncio async def test_arbiter_timeout_response(self): """Test that arbiter returns timeout error when worker doesn't respond.""" import asyncio import tempfile from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.protocol import DirtyProtocol, make_request class MockLog: def debug(self, *a, **kw): pass def info(self, *a, **kw): pass def warning(self, *a, **kw): pass def error(self, *a, **kw): pass def critical(self, *a, **kw): pass def exception(self, *a, **kw): pass def close_on_exec(self): pass def reopen_files(self): pass cfg = Config() cfg.set("dirty_workers", 0) cfg.set("dirty_timeout", 1) # 1 second timeout log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "arbiter.sock") worker_socket_path = os.path.join(tmpdir, "worker.sock") arbiter = DirtyArbiter(cfg=cfg, log=log, socket_path=socket_path) arbiter.pid = os.getpid() arbiter.alive = True slow_server = None try: # Register a fake worker that will never respond fake_pid = 99999 arbiter.workers[fake_pid] = "fake_worker" arbiter.worker_sockets[fake_pid] = worker_socket_path # Create a "slow" worker server that accepts but never responds async def slow_client_handler(reader, writer): # Read the request but don't respond (simulating timeout) try: await asyncio.sleep(10) # Longer than timeout except asyncio.CancelledError: pass finally: try: writer.close() await writer.wait_closed() except Exception: pass slow_server = await asyncio.start_unix_server( slow_client_handler, path=worker_socket_path ) request = make_request( request_id="timeout-test", app_path="test:App", action="slow_action" ) # Use MockStreamWriter to capture the response mock_writer = MockStreamWriter() await arbiter.route_request(request, mock_writer) assert len(mock_writer.messages) == 1 response = mock_writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert "timeout" in response["error"]["error_type"].lower() finally: # Cancel any pending consumer tasks arbiter.alive = False for task in arbiter.worker_consumers.values(): task.cancel() try: await task except asyncio.CancelledError: pass # Close worker connections arbiter._close_worker_connection(fake_pid) # Cleanup server if slow_server: slow_server.close() await slow_server.wait_closed() arbiter._cleanup_sync() @pytest.mark.asyncio async def test_full_request_response_flow(self): """Test full request-response flow between arbiter and worker.""" import asyncio import tempfile from gunicorn.dirty.arbiter import DirtyArbiter from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.protocol import DirtyProtocol, make_request class MockLog: def debug(self, *a, **kw): pass def info(self, *a, **kw): pass def warning(self, *a, **kw): pass def error(self, *a, **kw): pass def critical(self, *a, **kw): pass def exception(self, *a, **kw): pass def close_on_exec(self): pass def reopen_files(self): pass cfg = Config() cfg.set("dirty_workers", 0) cfg.set("dirty_timeout", 10) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: arbiter_socket_path = os.path.join(tmpdir, "arbiter.sock") worker_socket_path = os.path.join(tmpdir, "worker.sock") worker = None arbiter = None worker_server = None fake_pid = 12345 try: # Create worker worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=worker_socket_path ) worker.pid = os.getpid() worker.load_apps() # Start worker server worker_server = await asyncio.start_unix_server( worker.handle_connection, path=worker_socket_path ) # Create arbiter arbiter = DirtyArbiter(cfg=cfg, log=log, socket_path=arbiter_socket_path) arbiter.pid = os.getpid() arbiter.alive = True # Register worker arbiter.workers[fake_pid] = worker arbiter.worker_sockets[fake_pid] = worker_socket_path # Route a request using MockStreamWriter request = make_request( request_id="full-flow-test", app_path="tests.support_dirty_app:TestDirtyApp", action="compute", args=(7, 3), kwargs={"operation": "multiply"} ) mock_writer = MockStreamWriter() await arbiter.route_request(request, mock_writer) assert len(mock_writer.messages) == 1 response = mock_writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert response["result"] == 21 finally: # Cancel any pending consumer tasks if arbiter: arbiter.alive = False for task in arbiter.worker_consumers.values(): task.cancel() try: await task except asyncio.CancelledError: pass # Close arbiter's connection first arbiter._close_worker_connection(fake_pid) arbiter._cleanup_sync() # Close worker server if worker_server: worker_server.close() await worker_server.wait_closed() if worker: worker._cleanup() benoitc-gunicorn-f5fb19e/tests/test_dirty_protocol.py000066400000000000000000000504221514360242400233150ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty worker binary protocol module.""" import asyncio import os import socket import struct import pytest from gunicorn.dirty.protocol import ( BinaryProtocol, DirtyProtocol, make_request, make_response, make_error_response, make_chunk_message, make_end_message, MAGIC, VERSION, HEADER_SIZE, HEADER_FORMAT, MSG_TYPE_REQUEST, MSG_TYPE_RESPONSE, MSG_TYPE_ERROR, MSG_TYPE_CHUNK, MSG_TYPE_END, MAX_MESSAGE_SIZE, ) from gunicorn.dirty.errors import ( DirtyError, DirtyProtocolError, DirtyTimeoutError, DirtyAppError, ) class TestBinaryProtocolHeader: """Tests for header encoding/decoding.""" def test_header_size(self): """Test header size is 16 bytes.""" assert HEADER_SIZE == 16 def test_encode_header(self): """Test header encoding.""" header = BinaryProtocol.encode_header(MSG_TYPE_REQUEST, 12345, 100) assert len(header) == HEADER_SIZE assert header[:2] == MAGIC assert header[2] == VERSION assert header[3] == MSG_TYPE_REQUEST def test_decode_header(self): """Test header decoding.""" header = BinaryProtocol.encode_header(MSG_TYPE_RESPONSE, 67890, 200) msg_type, request_id, length = BinaryProtocol.decode_header(header) assert msg_type == MSG_TYPE_RESPONSE assert request_id == 67890 assert length == 200 def test_decode_header_invalid_magic(self): """Test header decoding with invalid magic.""" header = b"XX" + b"\x01\x01" + b"\x00" * 12 with pytest.raises(DirtyProtocolError) as exc_info: BinaryProtocol.decode_header(header) assert "magic" in str(exc_info.value).lower() def test_decode_header_invalid_version(self): """Test header decoding with invalid version.""" header = MAGIC + b"\x99\x01" + b"\x00" * 12 with pytest.raises(DirtyProtocolError) as exc_info: BinaryProtocol.decode_header(header) assert "version" in str(exc_info.value).lower() def test_decode_header_invalid_type(self): """Test header decoding with invalid message type.""" header = MAGIC + bytes([VERSION, 0xFF]) + b"\x00" * 12 with pytest.raises(DirtyProtocolError) as exc_info: BinaryProtocol.decode_header(header) assert "type" in str(exc_info.value).lower() def test_decode_header_too_large(self): """Test header decoding rejects too-large messages.""" header = struct.pack(HEADER_FORMAT, MAGIC, VERSION, MSG_TYPE_REQUEST, MAX_MESSAGE_SIZE + 1, 0) with pytest.raises(DirtyProtocolError) as exc_info: BinaryProtocol.decode_header(header) assert "too large" in str(exc_info.value).lower() def test_decode_header_too_short(self): """Test header decoding with too-short data.""" header = MAGIC + b"\x01" with pytest.raises(DirtyProtocolError) as exc_info: BinaryProtocol.decode_header(header) assert "short" in str(exc_info.value).lower() class TestBinaryProtocolEncodeDecode: """Tests for message encoding/decoding.""" def test_encode_decode_request(self): """Test request encoding/decoding roundtrip.""" encoded = BinaryProtocol.encode_request( request_id=12345, app_path="myapp.ml:MLApp", action="predict", args=("data",), kwargs={"temperature": 0.7} ) assert len(encoded) > HEADER_SIZE msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert msg_type_str == "request" assert request_id == 12345 assert payload["app_path"] == "myapp.ml:MLApp" assert payload["action"] == "predict" assert payload["args"] == ["data"] assert payload["kwargs"] == {"temperature": 0.7} def test_encode_decode_response(self): """Test response encoding/decoding roundtrip.""" result = {"predictions": [0.1, 0.9], "metadata": {"model": "v1"}} encoded = BinaryProtocol.encode_response(request_id=67890, result=result) msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert msg_type_str == "response" assert request_id == 67890 assert payload["result"] == result def test_encode_decode_error(self): """Test error encoding/decoding roundtrip.""" error = DirtyTimeoutError("Timed out", timeout=30) encoded = BinaryProtocol.encode_error(request_id=11111, error=error) msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert msg_type_str == "error" assert request_id == 11111 assert payload["error"]["error_type"] == "DirtyTimeoutError" assert "Timed out" in payload["error"]["message"] def test_encode_decode_chunk(self): """Test chunk encoding/decoding roundtrip.""" chunk_data = {"token": "hello", "index": 5} encoded = BinaryProtocol.encode_chunk(request_id=22222, data=chunk_data) msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert msg_type_str == "chunk" assert request_id == 22222 assert payload["data"] == chunk_data def test_encode_decode_end(self): """Test end message encoding/decoding roundtrip.""" encoded = BinaryProtocol.encode_end(request_id=33333) assert len(encoded) == HEADER_SIZE # End has no payload msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert msg_type_str == "end" assert request_id == 33333 assert payload == {} def test_encode_decode_binary_data(self): """Test binary data passes through without base64 encoding.""" binary_data = bytes(range(256)) encoded = BinaryProtocol.encode_response( request_id=44444, result={"data": binary_data} ) msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert payload["result"]["data"] == binary_data def test_encode_decode_large_message(self): """Test encoding a large message.""" large_data = b"x" * (1024 * 1024) # 1 MB encoded = BinaryProtocol.encode_response( request_id=55555, result={"data": large_data} ) msg_type_str, request_id, payload = BinaryProtocol.decode_message(encoded) assert payload["result"]["data"] == large_data class TestBinaryProtocolSync: """Tests for synchronous socket operations.""" def test_read_write_message(self): """Test read/write through socket pair.""" server_sock, client_sock = socket.socketpair() try: message = make_request( request_id=12345, app_path="test:App", action="run" ) BinaryProtocol.write_message(client_sock, message) received = BinaryProtocol.read_message(server_sock) assert received["type"] == "request" assert received["id"] == hash("12345") & 0xFFFFFFFFFFFFFFFF or \ received["id"] == 12345 assert received["app_path"] == "test:App" assert received["action"] == "run" finally: server_sock.close() client_sock.close() def test_read_write_with_int_id(self): """Test read/write with integer request ID.""" server_sock, client_sock = socket.socketpair() try: message = { "type": "request", "id": 999888777, "app_path": "test:App", "action": "run", "args": [], "kwargs": {} } BinaryProtocol.write_message(client_sock, message) received = BinaryProtocol.read_message(server_sock) assert received["id"] == 999888777 finally: server_sock.close() client_sock.close() def test_multiple_messages(self): """Test sending multiple messages.""" server_sock, client_sock = socket.socketpair() try: messages = [ make_request(i, f"app{i}:App", f"action{i}") for i in range(1, 4) ] for msg in messages: BinaryProtocol.write_message(client_sock, msg) for i, _ in enumerate(messages, 1): received = BinaryProtocol.read_message(server_sock) assert received["app_path"] == f"app{i}:App" assert received["action"] == f"action{i}" finally: server_sock.close() client_sock.close() def test_read_connection_closed(self): """Test reading from closed connection.""" server_sock, client_sock = socket.socketpair() client_sock.close() with pytest.raises(DirtyProtocolError) as exc_info: BinaryProtocol.read_message(server_sock) assert "closed" in str(exc_info.value).lower() server_sock.close() def test_binary_data_roundtrip(self): """Test binary data roundtrip through socket.""" server_sock, client_sock = socket.socketpair() try: binary_payload = b"\x00\x01\x02\xff\xfe\xfd" message = make_response(12345, {"binary": binary_payload}) BinaryProtocol.write_message(client_sock, message) received = BinaryProtocol.read_message(server_sock) assert received["result"]["binary"] == binary_payload finally: server_sock.close() client_sock.close() class TestBinaryProtocolAsync: """Tests for async stream operations.""" @pytest.mark.asyncio async def test_async_read_write(self): """Test async read/write with mock streams.""" message = make_request(12345, "test:App", "run") read_fd, write_fd = os.pipe() try: reader = asyncio.StreamReader() _ = asyncio.StreamReaderProtocol(reader) encoded = BinaryProtocol._encode_from_dict(message) os.write(write_fd, encoded) os.close(write_fd) write_fd = None data = os.read(read_fd, len(encoded)) reader.feed_data(data) reader.feed_eof() received = await BinaryProtocol.read_message_async(reader) assert received["type"] == "request" assert received["app_path"] == "test:App" finally: if write_fd is not None: os.close(write_fd) os.close(read_fd) @pytest.mark.asyncio async def test_async_read_incomplete_header(self): """Test async read with incomplete header.""" reader = asyncio.StreamReader() reader.feed_data(MAGIC + b"\x01") # Only 3 bytes reader.feed_eof() with pytest.raises((asyncio.IncompleteReadError, DirtyProtocolError)): await BinaryProtocol.read_message_async(reader) @pytest.mark.asyncio async def test_async_read_empty_connection(self): """Test async read on empty connection.""" reader = asyncio.StreamReader() reader.feed_eof() with pytest.raises(asyncio.IncompleteReadError): await BinaryProtocol.read_message_async(reader) @pytest.mark.asyncio async def test_async_read_invalid_magic(self): """Test async read rejects invalid magic.""" reader = asyncio.StreamReader() header = b"XX" + bytes([VERSION, MSG_TYPE_REQUEST]) + b"\x00" * 12 reader.feed_data(header) reader.feed_eof() with pytest.raises(DirtyProtocolError) as exc_info: await BinaryProtocol.read_message_async(reader) assert "magic" in str(exc_info.value).lower() @pytest.mark.asyncio async def test_async_read_message_too_large(self): """Test async read rejects too-large messages.""" reader = asyncio.StreamReader() header = struct.pack(HEADER_FORMAT, MAGIC, VERSION, MSG_TYPE_REQUEST, MAX_MESSAGE_SIZE + 1000, 0) reader.feed_data(header) reader.feed_eof() with pytest.raises(DirtyProtocolError) as exc_info: await BinaryProtocol.read_message_async(reader) assert "too large" in str(exc_info.value) class TestMessageBuilders: """Tests for message builder helper functions.""" def test_make_request(self): """Test request message builder.""" request = make_request( request_id="abc123", app_path="myapp.ml:MLApp", action="inference", args=("model1",), kwargs={"temperature": 0.7} ) assert request["type"] == DirtyProtocol.MSG_TYPE_REQUEST assert request["id"] == "abc123" assert request["app_path"] == "myapp.ml:MLApp" assert request["action"] == "inference" assert request["args"] == ["model1"] assert request["kwargs"] == {"temperature": 0.7} def test_make_request_minimal(self): """Test request with minimal arguments.""" request = make_request( request_id="abc", app_path="app:App", action="run" ) assert request["args"] == [] assert request["kwargs"] == {} def test_make_response(self): """Test response message builder.""" response = make_response( request_id="abc123", result={"status": "ok", "data": [1, 2, 3]} ) assert response["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert response["id"] == "abc123" assert response["result"] == {"status": "ok", "data": [1, 2, 3]} def test_make_error_response_with_exception(self): """Test error response with DirtyError.""" error = DirtyTimeoutError("Operation timed out", timeout=30) response = make_error_response("abc123", error) assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert response["id"] == "abc123" assert response["error"]["error_type"] == "DirtyTimeoutError" assert response["error"]["message"] == "Operation timed out" assert response["error"]["details"]["timeout"] == 30 def test_make_error_response_with_dict(self): """Test error response with dict.""" error_dict = { "error_type": "CustomError", "message": "Something went wrong", "details": {"code": 500} } response = make_error_response("abc123", error_dict) assert response["error"] == error_dict def test_make_error_response_with_generic_exception(self): """Test error response with generic exception.""" error = ValueError("Invalid value") response = make_error_response("abc123", error) assert response["error"]["error_type"] == "ValueError" assert response["error"]["message"] == "Invalid value" def test_make_chunk_message(self): """Test chunk message builder.""" chunk = make_chunk_message("req-123", "Hello, ") assert chunk["type"] == DirtyProtocol.MSG_TYPE_CHUNK assert chunk["id"] == "req-123" assert chunk["data"] == "Hello, " def test_make_chunk_message_with_complex_data(self): """Test chunk message with complex data.""" data = {"token": "world", "score": 0.95, "index": 5} chunk = make_chunk_message("req-456", data) assert chunk["type"] == DirtyProtocol.MSG_TYPE_CHUNK assert chunk["id"] == "req-456" assert chunk["data"] == data def test_make_chunk_message_with_binary_data(self): """Test chunk message with binary data.""" data = b"\x00\x01\x02\xff" chunk = make_chunk_message("req-789", data) assert chunk["data"] == data def test_make_end_message(self): """Test end message builder.""" end = make_end_message("req-123") assert end["type"] == DirtyProtocol.MSG_TYPE_END assert end["id"] == "req-123" assert "data" not in end def test_chunk_and_end_roundtrip(self): """Test that chunk and end messages can be encoded/decoded.""" chunk = make_chunk_message(12345, {"token": "hello"}) end = make_end_message(12345) # Test chunk roundtrip encoded_chunk = BinaryProtocol._encode_from_dict(chunk) msg_type, req_id, payload = BinaryProtocol.decode_message(encoded_chunk) assert msg_type == "chunk" assert payload["data"] == {"token": "hello"} # Test end roundtrip encoded_end = BinaryProtocol._encode_from_dict(end) msg_type, req_id, payload = BinaryProtocol.decode_message(encoded_end) assert msg_type == "end" assert payload == {} class TestDirtyErrors: """Tests for error classes.""" def test_dirty_error_to_dict(self): """Test serializing error to dict.""" error = DirtyError("Test error", {"key": "value"}) d = error.to_dict() assert d["error_type"] == "DirtyError" assert d["message"] == "Test error" assert d["details"] == {"key": "value"} def test_dirty_error_from_dict(self): """Test deserializing error from dict.""" d = { "error_type": "DirtyTimeoutError", "message": "Timed out", "details": {"timeout": 30} } error = DirtyError.from_dict(d) assert isinstance(error, DirtyTimeoutError) assert error.message == "Timed out" assert error.details["timeout"] == 30 def test_dirty_error_from_dict_unknown_type(self): """Test deserializing unknown error type falls back to DirtyError.""" d = { "error_type": "UnknownError", "message": "Unknown", "details": {} } error = DirtyError.from_dict(d) assert isinstance(error, DirtyError) assert not isinstance(error, DirtyTimeoutError) def test_dirty_app_error(self): """Test DirtyAppError fields.""" error = DirtyAppError( "App failed", app_path="myapp:App", action="run", traceback="Traceback..." ) assert error.app_path == "myapp:App" assert error.action == "run" assert error.traceback == "Traceback..." assert "myapp:App" in str(error) class TestBackwardsCompatibility: """Tests for backwards compatibility with old JSON API.""" def test_dirty_protocol_alias(self): """Test that DirtyProtocol is an alias for BinaryProtocol.""" assert DirtyProtocol is BinaryProtocol def test_header_size_attribute(self): """Test HEADER_SIZE is accessible on class.""" assert DirtyProtocol.HEADER_SIZE == 16 def test_msg_type_constants(self): """Test message type constants are strings for compatibility.""" assert DirtyProtocol.MSG_TYPE_REQUEST == "request" assert DirtyProtocol.MSG_TYPE_RESPONSE == "response" assert DirtyProtocol.MSG_TYPE_ERROR == "error" assert DirtyProtocol.MSG_TYPE_CHUNK == "chunk" assert DirtyProtocol.MSG_TYPE_END == "end" def test_encode_decode_preserves_dict_format(self): """Test that read_message returns dict compatible with old API.""" server_sock, client_sock = socket.socketpair() try: message = { "type": "response", "id": 12345, "result": {"status": "ok"} } DirtyProtocol.write_message(client_sock, message) received = DirtyProtocol.read_message(server_sock) # Old API: access via dict keys assert received["type"] == "response" assert received["result"]["status"] == "ok" finally: server_sock.close() client_sock.close() def test_string_request_id_handled(self): """Test that string request IDs are handled (hashed to int).""" server_sock, client_sock = socket.socketpair() try: message = make_request("uuid-string-id", "test:App", "run") DirtyProtocol.write_message(client_sock, message) received = DirtyProtocol.read_message(server_sock) # Request ID should be converted to int assert isinstance(received["id"], int) finally: server_sock.close() client_sock.close() benoitc-gunicorn-f5fb19e/tests/test_dirty_stash.py000066400000000000000000000152201514360242400225730ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty stash (shared state) functionality.""" import pytest from gunicorn.dirty.stash import ( StashClient, StashTable, StashError, StashTableNotFoundError, StashKeyNotFoundError, ) from gunicorn.dirty.protocol import ( BinaryProtocol, DirtyProtocol, MSG_TYPE_STASH, STASH_OP_PUT, STASH_OP_GET, STASH_OP_DELETE, STASH_OP_KEYS, STASH_OP_CLEAR, STASH_OP_INFO, STASH_OP_ENSURE, STASH_OP_DELETE_TABLE, STASH_OP_TABLES, STASH_OP_EXISTS, make_stash_message, ) class TestStashProtocol: """Test stash protocol encoding.""" def test_make_stash_message_basic(self): """Test basic stash message creation.""" msg = make_stash_message(123, STASH_OP_PUT, "test_table") assert msg["type"] == "stash" assert msg["id"] == 123 assert msg["op"] == STASH_OP_PUT assert msg["table"] == "test_table" def test_make_stash_message_with_key_value(self): """Test stash message with key and value.""" msg = make_stash_message( 456, STASH_OP_PUT, "sessions", key="user:1", value={"name": "Alice"} ) assert msg["key"] == "user:1" assert msg["value"] == {"name": "Alice"} def test_make_stash_message_with_pattern(self): """Test stash message with pattern.""" msg = make_stash_message( 789, STASH_OP_KEYS, "sessions", pattern="user:*" ) assert msg["pattern"] == "user:*" def test_encode_stash_message(self): """Test binary encoding of stash message.""" msg = make_stash_message( 123, STASH_OP_PUT, "test", key="k", value="v" ) encoded = BinaryProtocol._encode_from_dict(msg) assert isinstance(encoded, bytes) assert len(encoded) > 16 # Header + payload def test_stash_message_roundtrip(self): """Test encode/decode roundtrip for stash message.""" original = make_stash_message( 12345, STASH_OP_GET, "cache", key="my_key" ) encoded = BinaryProtocol._encode_from_dict(original) msg_type, request_id, payload = BinaryProtocol.decode_message(encoded) assert msg_type == "stash" assert payload["op"] == STASH_OP_GET assert payload["table"] == "cache" assert payload["key"] == "my_key" def test_stash_operations_have_unique_codes(self): """Test that all stash operations have unique codes.""" ops = [ STASH_OP_PUT, STASH_OP_GET, STASH_OP_DELETE, STASH_OP_KEYS, STASH_OP_CLEAR, STASH_OP_INFO, STASH_OP_ENSURE, STASH_OP_DELETE_TABLE, STASH_OP_TABLES, STASH_OP_EXISTS, ] assert len(ops) == len(set(ops)) class TestStashTable: """Test StashTable dict-like interface.""" def test_stash_table_name(self): """Test StashTable name property.""" # Create a mock client class MockClient: pass table = StashTable(MockClient(), "test_table") assert table.name == "test_table" class TestStashErrors: """Test stash error classes.""" def test_stash_error_base(self): """Test base StashError.""" error = StashError("test error") assert str(error) == "test error" assert isinstance(error, Exception) def test_stash_table_not_found_error(self): """Test StashTableNotFoundError.""" error = StashTableNotFoundError("my_table") assert error.table_name == "my_table" assert "my_table" in str(error) def test_stash_key_not_found_error(self): """Test StashKeyNotFoundError.""" error = StashKeyNotFoundError("my_table", "my_key") assert error.table_name == "my_table" assert error.key == "my_key" assert "my_key" in str(error) class TestStashProtocolConstants: """Test protocol constants for stash.""" def test_msg_type_stash_exists(self): """Test MSG_TYPE_STASH constant exists.""" assert MSG_TYPE_STASH == 0x10 def test_dirty_protocol_exports_stash_type(self): """Test DirtyProtocol exports stash type.""" assert DirtyProtocol.MSG_TYPE_STASH == "stash" def test_stash_op_codes(self): """Test stash operation codes are integers.""" assert isinstance(STASH_OP_PUT, int) assert isinstance(STASH_OP_GET, int) assert isinstance(STASH_OP_DELETE, int) assert isinstance(STASH_OP_KEYS, int) assert isinstance(STASH_OP_CLEAR, int) assert isinstance(STASH_OP_INFO, int) assert isinstance(STASH_OP_ENSURE, int) assert isinstance(STASH_OP_DELETE_TABLE, int) assert isinstance(STASH_OP_TABLES, int) assert isinstance(STASH_OP_EXISTS, int) class TestStashEncodingEdgeCases: """Test edge cases in stash encoding.""" def test_encode_empty_table_name(self): """Test encoding with empty table name.""" msg = make_stash_message(1, STASH_OP_TABLES, "") encoded = BinaryProtocol._encode_from_dict(msg) assert isinstance(encoded, bytes) def test_encode_unicode_table_name(self): """Test encoding with unicode table name.""" msg = make_stash_message(1, STASH_OP_PUT, "テスト", key="k", value="v") encoded = BinaryProtocol._encode_from_dict(msg) _, _, payload = BinaryProtocol.decode_message(encoded) assert payload["table"] == "テスト" def test_encode_complex_value(self): """Test encoding with complex nested value.""" value = { "name": "test", "count": 42, "nested": {"a": [1, 2, 3]}, "data": b"binary data", } msg = make_stash_message(1, STASH_OP_PUT, "test", key="k", value=value) encoded = BinaryProtocol._encode_from_dict(msg) _, _, payload = BinaryProtocol.decode_message(encoded) assert payload["value"] == value def test_encode_none_key(self): """Test encoding with None key (for table-level ops).""" msg = make_stash_message(1, STASH_OP_TABLES, "") assert "key" not in msg def test_encode_special_characters_in_pattern(self): """Test encoding with special characters in pattern.""" msg = make_stash_message( 1, STASH_OP_KEYS, "test", pattern="user:*:session:?" ) encoded = BinaryProtocol._encode_from_dict(msg) _, _, payload = BinaryProtocol.decode_message(encoded) assert payload["pattern"] == "user:*:session:?" benoitc-gunicorn-f5fb19e/tests/test_dirty_tlv.py000066400000000000000000000445671514360242400222760ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty TLV binary encoder/decoder.""" import math import struct import pytest from gunicorn.dirty.tlv import ( TLVEncoder, TYPE_NONE, TYPE_BOOL, TYPE_INT64, TYPE_FLOAT64, TYPE_BYTES, TYPE_STRING, TYPE_LIST, TYPE_DICT, MAX_STRING_SIZE, MAX_BYTES_SIZE, MAX_LIST_SIZE, MAX_DICT_SIZE, ) from gunicorn.dirty.errors import DirtyProtocolError class TestTLVEncoderBasicTypes: """Tests for basic type encoding/decoding.""" def test_encode_decode_none(self): """Test None encoding/decoding.""" encoded = TLVEncoder.encode(None) assert encoded == bytes([TYPE_NONE]) value, offset = TLVEncoder.decode(encoded, 0) assert value is None assert offset == 1 def test_encode_decode_true(self): """Test True encoding/decoding.""" encoded = TLVEncoder.encode(True) assert encoded == bytes([TYPE_BOOL, 0x01]) value, offset = TLVEncoder.decode(encoded, 0) assert value is True assert offset == 2 def test_encode_decode_false(self): """Test False encoding/decoding.""" encoded = TLVEncoder.encode(False) assert encoded == bytes([TYPE_BOOL, 0x00]) value, offset = TLVEncoder.decode(encoded, 0) assert value is False assert offset == 2 def test_encode_decode_positive_int(self): """Test positive integer encoding/decoding.""" encoded = TLVEncoder.encode(42) assert encoded[0] == TYPE_INT64 assert len(encoded) == 9 # 1 type + 8 value value, offset = TLVEncoder.decode(encoded, 0) assert value == 42 assert offset == 9 def test_encode_decode_negative_int(self): """Test negative integer encoding/decoding.""" encoded = TLVEncoder.encode(-12345) value, offset = TLVEncoder.decode(encoded, 0) assert value == -12345 def test_encode_decode_large_int(self): """Test large integer encoding/decoding.""" large_val = 2**62 encoded = TLVEncoder.encode(large_val) value, offset = TLVEncoder.decode(encoded, 0) assert value == large_val def test_encode_decode_zero(self): """Test zero encoding/decoding.""" encoded = TLVEncoder.encode(0) value, offset = TLVEncoder.decode(encoded, 0) assert value == 0 def test_encode_decode_float(self): """Test float encoding/decoding.""" encoded = TLVEncoder.encode(3.14159) assert encoded[0] == TYPE_FLOAT64 assert len(encoded) == 9 # 1 type + 8 value value, offset = TLVEncoder.decode(encoded, 0) assert abs(value - 3.14159) < 1e-10 def test_encode_decode_negative_float(self): """Test negative float encoding/decoding.""" encoded = TLVEncoder.encode(-273.15) value, offset = TLVEncoder.decode(encoded, 0) assert abs(value - (-273.15)) < 1e-10 def test_encode_decode_float_infinity(self): """Test infinity encoding/decoding.""" encoded = TLVEncoder.encode(float('inf')) value, offset = TLVEncoder.decode(encoded, 0) assert value == float('inf') def test_encode_decode_float_nan(self): """Test NaN encoding/decoding.""" encoded = TLVEncoder.encode(float('nan')) value, offset = TLVEncoder.decode(encoded, 0) assert math.isnan(value) class TestTLVEncoderBytes: """Tests for bytes encoding/decoding.""" def test_encode_decode_empty_bytes(self): """Test empty bytes encoding/decoding.""" encoded = TLVEncoder.encode(b"") assert encoded[0] == TYPE_BYTES value, offset = TLVEncoder.decode(encoded, 0) assert value == b"" def test_encode_decode_bytes(self): """Test bytes encoding/decoding.""" data = b"\x00\x01\x02\xff\xfe\xfd" encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_large_bytes(self): """Test large bytes encoding/decoding.""" data = b"x" * 10000 encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_bytes_too_large(self): """Test that bytes exceeding max size raises error.""" # We won't actually allocate MAX_BYTES_SIZE, just check the encoding with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.encode(b"x" * (MAX_BYTES_SIZE + 1)) assert "too large" in str(exc_info.value).lower() class TestTLVEncoderString: """Tests for string encoding/decoding.""" def test_encode_decode_empty_string(self): """Test empty string encoding/decoding.""" encoded = TLVEncoder.encode("") assert encoded[0] == TYPE_STRING value, offset = TLVEncoder.decode(encoded, 0) assert value == "" def test_encode_decode_ascii_string(self): """Test ASCII string encoding/decoding.""" encoded = TLVEncoder.encode("hello world") value, offset = TLVEncoder.decode(encoded, 0) assert value == "hello world" def test_encode_decode_unicode_string(self): """Test Unicode string encoding/decoding.""" text = "Hello, world! \u00a9 \u2603 \U0001F600" encoded = TLVEncoder.encode(text) value, offset = TLVEncoder.decode(encoded, 0) assert value == text def test_encode_decode_chinese(self): """Test Chinese characters encoding/decoding.""" text = "Hello, world!" encoded = TLVEncoder.encode(text) value, offset = TLVEncoder.decode(encoded, 0) assert value == text def test_encode_decode_emoji(self): """Test emoji encoding/decoding.""" text = "Test emoji" encoded = TLVEncoder.encode(text) value, offset = TLVEncoder.decode(encoded, 0) assert value == text def test_encode_decode_large_string(self): """Test large string encoding/decoding.""" text = "x" * 10000 encoded = TLVEncoder.encode(text) value, offset = TLVEncoder.decode(encoded, 0) assert value == text class TestTLVEncoderList: """Tests for list encoding/decoding.""" def test_encode_decode_empty_list(self): """Test empty list encoding/decoding.""" encoded = TLVEncoder.encode([]) assert encoded[0] == TYPE_LIST value, offset = TLVEncoder.decode(encoded, 0) assert value == [] def test_encode_decode_simple_list(self): """Test simple list encoding/decoding.""" data = [1, 2, 3] encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_mixed_list(self): """Test mixed type list encoding/decoding.""" data = [1, "hello", 3.14, True, None, b"bytes"] encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_nested_list(self): """Test nested list encoding/decoding.""" data = [[1, 2], [3, [4, 5]], ["a", "b"]] encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_tuple_as_list(self): """Test that tuples are encoded as lists.""" data = (1, 2, 3) encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == [1, 2, 3] # Decoded as list def test_encode_decode_large_list(self): """Test large list encoding/decoding.""" data = list(range(1000)) encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data class TestTLVEncoderDict: """Tests for dict encoding/decoding.""" def test_encode_decode_empty_dict(self): """Test empty dict encoding/decoding.""" encoded = TLVEncoder.encode({}) assert encoded[0] == TYPE_DICT value, offset = TLVEncoder.decode(encoded, 0) assert value == {} def test_encode_decode_simple_dict(self): """Test simple dict encoding/decoding.""" data = {"a": 1, "b": 2, "c": 3} encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_mixed_values_dict(self): """Test dict with mixed value types.""" data = { "int": 42, "float": 3.14, "string": "hello", "bool": True, "none": None, "bytes": b"data", "list": [1, 2, 3], } encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_nested_dict(self): """Test nested dict encoding/decoding.""" data = { "outer": { "inner": { "value": 42 }, "list": [{"a": 1}, {"b": 2}] } } encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_dict_non_string_key_converted(self): """Test that non-string keys are converted to strings (like JSON).""" data = {1: "value", 2: "other"} encoded = TLVEncoder.encode(data) decoded, _ = TLVEncoder.decode(encoded, 0) # Keys should be converted to strings assert decoded == {"1": "value", "2": "other"} class TestTLVEncoderComplexStructures: """Tests for complex nested structures.""" def test_encode_decode_request_like(self): """Test encoding/decoding a request-like structure.""" data = { "id": 12345, "app_path": "myapp.ml:MLApp", "action": "predict", "args": [b"input_data", 0.7], "kwargs": {"temperature": 0.7, "max_tokens": 1000}, } encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_response_like(self): """Test encoding/decoding a response-like structure.""" data = { "id": 12345, "result": { "predictions": [0.1, 0.2, 0.7], "metadata": {"model": "v1.0", "latency_ms": 42}, } } encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data def test_encode_decode_deeply_nested(self): """Test deeply nested structures.""" data = {"a": {"b": {"c": {"d": {"e": {"f": "deep"}}}}}} encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data class TestTLVEncoderRoundtrip: """Tests for complete roundtrip using decode_full.""" def test_decode_full_simple(self): """Test decode_full with simple value.""" data = {"key": "value"} encoded = TLVEncoder.encode(data) value = TLVEncoder.decode_full(encoded) assert value == data def test_decode_full_trailing_data(self): """Test decode_full raises on trailing data.""" encoded = TLVEncoder.encode(42) + b"extra" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode_full(encoded) assert "trailing" in str(exc_info.value).lower() class TestTLVEncoderErrors: """Tests for error handling.""" def test_decode_empty_data(self): """Test decoding empty data raises error.""" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(b"", 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_int(self): """Test decoding truncated int raises error.""" # TYPE_INT64 followed by only 4 bytes instead of 8 data = bytes([TYPE_INT64, 0, 0, 0, 0]) with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_float(self): """Test decoding truncated float raises error.""" data = bytes([TYPE_FLOAT64, 0, 0, 0, 0]) with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_bytes_length(self): """Test decoding truncated bytes length raises error.""" data = bytes([TYPE_BYTES, 0, 0]) # Only 2 bytes of length with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_bytes_data(self): """Test decoding truncated bytes data raises error.""" # Says 10 bytes but only provides 5 data = bytes([TYPE_BYTES]) + struct.pack(">I", 10) + b"12345" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_string_length(self): """Test decoding truncated string length raises error.""" data = bytes([TYPE_STRING, 0]) with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_string_data(self): """Test decoding truncated string data raises error.""" data = bytes([TYPE_STRING]) + struct.pack(">I", 10) + b"hello" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_invalid_utf8(self): """Test decoding invalid UTF-8 raises error.""" # Valid length, but invalid UTF-8 bytes data = bytes([TYPE_STRING]) + struct.pack(">I", 3) + b"\x80\x81\x82" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "utf-8" in str(exc_info.value).lower() def test_decode_truncated_list_count(self): """Test decoding truncated list count raises error.""" data = bytes([TYPE_LIST, 0]) with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_truncated_dict_count(self): """Test decoding truncated dict count raises error.""" data = bytes([TYPE_DICT, 0]) with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "truncated" in str(exc_info.value).lower() def test_decode_unknown_type(self): """Test decoding unknown type raises error.""" data = bytes([0xFF]) # Unknown type with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "unknown" in str(exc_info.value).lower() def test_encode_unsupported_type(self): """Test encoding unsupported type raises error.""" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.encode(object()) assert "unsupported type" in str(exc_info.value).lower() def test_encode_function_raises_error(self): """Test encoding a function raises error.""" with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.encode(lambda x: x) assert "unsupported type" in str(exc_info.value).lower() def test_decode_dict_non_string_key_in_data(self): """Test decoding dict with non-string key raises error.""" # Manually construct a dict with int key # TYPE_DICT, count=1, TYPE_INT64 key, TYPE_INT64 value data = ( bytes([TYPE_DICT]) + struct.pack(">I", 1) + bytes([TYPE_INT64]) + struct.pack(">q", 1) # Key (int, not string) + bytes([TYPE_INT64]) + struct.pack(">q", 2) # Value ) with pytest.raises(DirtyProtocolError) as exc_info: TLVEncoder.decode(data, 0) assert "string" in str(exc_info.value).lower() class TestTLVEncoderOffset: """Tests for offset handling.""" def test_decode_with_offset(self): """Test decoding from specific offset.""" # Create data with prefix prefix = b"garbage" encoded = TLVEncoder.encode(42) data = prefix + encoded value, offset = TLVEncoder.decode(data, len(prefix)) assert value == 42 assert offset == len(prefix) + len(encoded) def test_decode_multiple_values(self): """Test decoding multiple consecutive values.""" v1 = TLVEncoder.encode("hello") v2 = TLVEncoder.encode(42) v3 = TLVEncoder.encode([1, 2, 3]) data = v1 + v2 + v3 offset = 0 val1, offset = TLVEncoder.decode(data, offset) assert val1 == "hello" val2, offset = TLVEncoder.decode(data, offset) assert val2 == 42 val3, offset = TLVEncoder.decode(data, offset) assert val3 == [1, 2, 3] assert offset == len(data) class TestTLVEncoderBinaryData: """Tests for binary data handling (the main motivation for this protocol).""" def test_binary_data_no_encoding(self): """Test that binary data is passed through without encoding.""" # This is the key advantage over JSON - binary data doesn't need base64 binary_data = bytes(range(256)) # All byte values encoded = TLVEncoder.encode(binary_data) value, offset = TLVEncoder.decode(encoded, 0) assert value == binary_data def test_binary_with_null_bytes(self): """Test binary data with embedded null bytes.""" binary_data = b"\x00\x00\xff\x00\x00" encoded = TLVEncoder.encode(binary_data) value, offset = TLVEncoder.decode(encoded, 0) assert value == binary_data def test_binary_in_nested_structure(self): """Test binary data inside nested structures.""" data = { "image": b"\x89PNG\r\n\x1a\n" + b"\x00" * 100, "metadata": {"width": 640, "height": 480}, "chunks": [b"chunk1", b"chunk2", b"chunk3"], } encoded = TLVEncoder.encode(data) value, offset = TLVEncoder.decode(encoded, 0) assert value == data benoitc-gunicorn-f5fb19e/tests/test_dirty_worker.py000066400000000000000000001020121514360242400227560ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for dirty worker module.""" import asyncio import os import signal import tempfile import pytest from gunicorn.config import Config from gunicorn.dirty.worker import DirtyWorker from gunicorn.dirty.protocol import ( DirtyProtocol, BinaryProtocol, make_request, HEADER_SIZE, HEADER_FORMAT, ) from gunicorn.dirty.errors import DirtyAppNotFoundError import struct class MockLog: """Mock logger for testing.""" def __init__(self): self.messages = [] def debug(self, msg, *args): self.messages.append(("debug", msg % args if args else msg)) def info(self, msg, *args): self.messages.append(("info", msg % args if args else msg)) def warning(self, msg, *args): self.messages.append(("warning", msg % args if args else msg)) def error(self, msg, *args): self.messages.append(("error", msg % args if args else msg)) def close_on_exec(self): pass def reopen_files(self): pass class MockStreamWriter: """Mock StreamWriter that captures written messages.""" def __init__(self): self.messages = [] self._buffer = b"" self.closed = False def write(self, data): self._buffer += data async def drain(self): # Decode the buffer to extract messages using binary protocol while len(self._buffer) >= HEADER_SIZE: # Decode header to get payload length _, _, length = BinaryProtocol.decode_header( self._buffer[:HEADER_SIZE] ) total_size = HEADER_SIZE + length if len(self._buffer) >= total_size: msg_data = self._buffer[:total_size] self._buffer = self._buffer[total_size:] # decode_message returns (msg_type_str, request_id, payload_dict) msg_type_str, request_id, payload_dict = BinaryProtocol.decode_message(msg_data) # Reconstruct the dict format for backwards compatibility result = {"type": msg_type_str, "id": request_id} result.update(payload_dict) self.messages.append(result) else: break def close(self): self.closed = True async def wait_closed(self): pass def get_extra_info(self, name): return None class TestDirtyWorkerInit: """Tests for DirtyWorker initialization.""" def test_init_attributes(self): """Test that worker is initialized with correct attributes.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) assert worker.age == 1 assert worker.ppid == os.getpid() assert worker.app_paths == ["tests.support_dirty_app:TestDirtyApp"] assert worker.socket_path == socket_path assert worker.booted is False assert worker.alive is True assert worker.apps == {} def test_str_representation(self): """Test string representation.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) assert "DirtyWorker" in str(worker) class TestDirtyWorkerLoadApps: """Tests for app loading.""" def test_load_apps_success(self): """Test successful app loading.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() assert "tests.support_dirty_app:TestDirtyApp" in worker.apps app = worker.apps["tests.support_dirty_app:TestDirtyApp"] assert app.initialized is True # init() was called def test_load_apps_failure(self): """Test failed app loading.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["nonexistent:App"], cfg=cfg, log=log, socket_path=socket_path ) with pytest.raises(Exception): worker.load_apps() class TestDirtyWorkerExecute: """Tests for request execution.""" @pytest.mark.asyncio async def test_execute_success(self): """Test successful execution.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() result = await worker.execute( "tests.support_dirty_app:TestDirtyApp", "compute", [2, 3], {"operation": "add"} ) assert result == 5 @pytest.mark.asyncio async def test_execute_app_not_found(self): """Test execution with unknown app.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) with pytest.raises(DirtyAppNotFoundError): await worker.execute("unknown:App", "action", [], {}) class TestDirtyWorkerHandleRequest: """Tests for request handling.""" @pytest.mark.asyncio async def test_handle_request_success(self): """Test handling a successful request.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() request = make_request( request_id=123, app_path="tests.support_dirty_app:TestDirtyApp", action="compute", args=(2, 3), kwargs={"operation": "multiply"} ) writer = MockStreamWriter() await worker.handle_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert response["id"] == 123 assert response["result"] == 6 @pytest.mark.asyncio async def test_handle_request_error(self): """Test handling a request that fails.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() request = make_request( request_id=456, app_path="tests.support_dirty_app:TestDirtyApp", action="compute", args=(2, 3), kwargs={"operation": "invalid"} ) writer = MockStreamWriter() await worker.handle_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert response["id"] == 456 assert "Unknown operation" in response["error"]["message"] @pytest.mark.asyncio async def test_handle_request_unknown_type(self): """Test handling request with unknown type.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) request = {"type": "unknown", "id": 789} writer = MockStreamWriter() await worker.handle_request(request, writer) assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_ERROR assert "Unknown message type" in response["error"]["message"] class TestDirtyWorkerCleanup: """Tests for worker cleanup.""" def test_cleanup_closes_apps(self): """Test that cleanup closes all apps.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() app = worker.apps["tests.support_dirty_app:TestDirtyApp"] assert app.closed is False worker._cleanup() assert app.closed is True def test_cleanup_removes_socket(self): """Test that cleanup removes the socket file.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Create the socket file with open(socket_path, 'w') as f: f.write('') assert os.path.exists(socket_path) worker._cleanup() assert not os.path.exists(socket_path) class TestDirtyWorkerNotify: """Tests for worker heartbeat.""" def test_notify_calls_tmp_notify(self): """Test that notify calls tmp.notify().""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Just verify notify doesn't raise worker.notify() worker.notify() worker.tmp.close() class TestDirtyWorkerSignals: """Tests for signal handling.""" def test_signal_handler_sets_alive_false(self): """Test that signal handler sets alive to False.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) assert worker.alive is True worker._signal_handler(signal.SIGTERM, None) assert worker.alive is False worker.tmp.close() def test_signal_handler_sigusr1_reopens_logs(self): """Test that SIGUSR1 calls reopen_files.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Should call reopen_files and NOT set alive to False assert worker.alive is True worker._signal_handler(signal.SIGUSR1, None) assert worker.alive is True worker.tmp.close() def test_signal_handler_with_loop_calls_shutdown(self): """Test that signal handler with loop calls shutdown.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Create a mock loop loop = asyncio.new_event_loop() worker._loop = loop shutdown_called = [] def mock_call_soon_threadsafe(cb): shutdown_called.append(cb) loop.call_soon_threadsafe = mock_call_soon_threadsafe worker._signal_handler(signal.SIGTERM, None) assert worker.alive is False assert len(shutdown_called) == 1 loop.close() worker.tmp.close() def test_signal_handler_sigquit(self): """Test SIGQUIT handling.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) worker._signal_handler(signal.SIGQUIT, None) assert worker.alive is False worker.tmp.close() def test_signal_handler_sigint(self): """Test SIGINT handling.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) worker._signal_handler(signal.SIGINT, None) assert worker.alive is False worker.tmp.close() def test_signal_handler_sigabrt(self): """Test SIGABRT handling (timeout signal).""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) worker._signal_handler(signal.SIGABRT, None) assert worker.alive is False worker.tmp.close() class TestDirtyWorkerShutdown: """Tests for worker shutdown.""" def test_shutdown_closes_server(self): """Test that _shutdown closes the server.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Mock server class MockServer: def __init__(self): self.closed = False def close(self): self.closed = True worker._server = MockServer() worker._shutdown() assert worker._server.closed is True worker.tmp.close() def test_shutdown_without_server(self): """Test that _shutdown works when server is None.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Should not raise worker._shutdown() worker.tmp.close() class TestDirtyWorkerRunAsync: """Tests for async run loop.""" @pytest.mark.asyncio async def test_run_async_creates_socket(self): """Test that _run_async creates Unix socket server.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() # Start the server in background async def run_briefly(): # Remove existing socket if os.path.exists(socket_path): os.unlink(socket_path) worker._server = await asyncio.start_unix_server( worker.handle_connection, path=socket_path ) os.chmod(socket_path, 0o600) # Verify socket exists assert os.path.exists(socket_path) # Close immediately worker._server.close() await worker._server.wait_closed() await run_briefly() worker.tmp.close() @pytest.mark.asyncio async def test_heartbeat_loop(self): """Test heartbeat loop updates tmp.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Test that notify method works worker.notify() worker.notify() worker.notify() # Verify no exceptions raised assert worker.tmp is not None worker.tmp.close() @pytest.mark.asyncio async def test_handle_connection_basic(self): """Test handle_connection reads and responds to messages.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() worker.pid = os.getpid() # Create a simple test using stream reader/writer request = make_request( request_id=999, app_path="tests.support_dirty_app:TestDirtyApp", action="compute", args=(5, 3), kwargs={"operation": "add"} ) # Mock reader and writer reader = asyncio.StreamReader() encoded_request = BinaryProtocol._encode_from_dict(request) reader.feed_data(encoded_request) reader.feed_eof() writer = MockStreamWriter() # Handle one message then exit worker.alive = True try: message = await DirtyProtocol.read_message_async(reader) await worker.handle_request(message, writer) except asyncio.IncompleteReadError: pass # Check response from writer assert len(writer.messages) == 1 response = writer.messages[0] assert response["type"] == DirtyProtocol.MSG_TYPE_RESPONSE assert response["result"] == 8 worker._cleanup() class TestDirtyWorkerRun: """Tests for the run() method.""" def test_run_creates_and_runs_loop(self): """Test that run() creates and runs an event loop.""" cfg = Config() cfg.set("dirty_timeout", 300) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() # Override _run_async to exit quickly run_async_called = [] async def mock_run_async(): run_async_called.append(True) # Exit immediately worker._run_async = mock_run_async worker.run() assert len(run_async_called) == 1 worker.tmp.close() def test_run_handles_exception(self): """Test that run() handles exceptions and cleans up.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() # Override _run_async to raise async def failing_run_async(): raise RuntimeError("Test error") worker._run_async = failing_run_async # Should not raise, should log error worker.run() # Check error was logged assert any("Worker error" in msg for level, msg in log.messages) class TestDirtyWorkerInitProcess: """Tests for init_process post-fork setup.""" def test_init_signals_setup(self): """Test that init_signals sets up signal handlers.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Store original handlers original_sigterm = signal.getsignal(signal.SIGTERM) try: worker.init_signals() # Verify handlers are set assert signal.getsignal(signal.SIGTERM) == worker._signal_handler assert signal.getsignal(signal.SIGQUIT) == worker._signal_handler assert signal.getsignal(signal.SIGINT) == worker._signal_handler assert signal.getsignal(signal.SIGABRT) == worker._signal_handler assert signal.getsignal(signal.SIGUSR1) == worker._signal_handler finally: # Restore original handler signal.signal(signal.SIGTERM, original_sigterm) worker.tmp.close() class TestDirtyWorkerCleanupErrors: """Tests for cleanup error handling.""" def test_cleanup_handles_app_close_error(self): """Test that cleanup handles errors when closing apps.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.load_apps() app = worker.apps["tests.support_dirty_app:TestDirtyApp"] # Make close() raise an error def failing_close(): raise RuntimeError("Close failed") app.close = failing_close # Should not raise, should log error worker._cleanup() assert any("Error closing dirty app" in msg for level, msg in log.messages) def test_cleanup_handles_missing_socket(self): """Test that cleanup handles non-existent socket file.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "nonexistent.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Should not raise even if socket doesn't exist worker._cleanup() def test_cleanup_handles_tmp_close_error(self): """Test that cleanup handles tmp.close() errors.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=[], cfg=cfg, log=log, socket_path=socket_path ) # Close tmp so second close might fail worker.tmp.close() # Should not raise worker._cleanup() class TestDirtyWorkerLoadAppsInit: """Tests for app loading with init failure.""" def test_load_apps_init_failure(self): """Test that load_apps handles init() failure.""" cfg = Config() log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:BrokenInitApp"], cfg=cfg, log=log, socket_path=socket_path ) with pytest.raises(RuntimeError, match="Init failed"): worker.load_apps() # Error should be logged assert any("Failed to initialize" in msg for level, msg in log.messages) class TestDirtyWorkerExecutionTimeout: """Tests for execution timeout control.""" @pytest.mark.asyncio async def test_execute_with_timeout(self): """Test that execute enforces timeout.""" from concurrent.futures import ThreadPoolExecutor cfg = Config() cfg.set("dirty_timeout", 1) # 1 second timeout cfg.set("dirty_threads", 1) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:SlowDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() # Create executor manually for test worker._executor = ThreadPoolExecutor(max_workers=1) try: worker.load_apps() # Execute slow action that exceeds timeout from gunicorn.dirty.errors import DirtyTimeoutError with pytest.raises(DirtyTimeoutError): await worker.execute( "tests.support_dirty_app:SlowDirtyApp", "slow_action", [], {"delay": 5.0} # 5 second delay, 1 second timeout ) finally: worker._cleanup() @pytest.mark.asyncio async def test_execute_within_timeout(self): """Test that execute succeeds within timeout.""" from concurrent.futures import ThreadPoolExecutor cfg = Config() cfg.set("dirty_timeout", 10) # 10 second timeout cfg.set("dirty_threads", 1) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:SlowDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() # Create executor manually for test worker._executor = ThreadPoolExecutor(max_workers=1) try: worker.load_apps() # Execute fast action that completes within timeout result = await worker.execute( "tests.support_dirty_app:SlowDirtyApp", "fast_action", [], {} ) assert result == {"fast": True} finally: worker._cleanup() @pytest.mark.asyncio async def test_execute_no_timeout_when_zero(self): """Test that timeout is disabled when dirty_timeout is 0.""" from concurrent.futures import ThreadPoolExecutor cfg = Config() cfg.set("dirty_timeout", 0) # Disabled cfg.set("dirty_threads", 1) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() # Create executor manually for test worker._executor = ThreadPoolExecutor(max_workers=1) try: worker.load_apps() # Should work with no timeout result = await worker.execute( "tests.support_dirty_app:TestDirtyApp", "compute", [2, 3], {"operation": "add"} ) assert result == 5 finally: worker._cleanup() def test_run_creates_executor_with_threads(self): """Test that run() creates executor with dirty_threads config.""" cfg = Config() cfg.set("dirty_timeout", 300) cfg.set("dirty_threads", 4) log = MockLog() with tempfile.TemporaryDirectory() as tmpdir: socket_path = os.path.join(tmpdir, "worker.sock") worker = DirtyWorker( age=1, ppid=os.getpid(), app_paths=["tests.support_dirty_app:TestDirtyApp"], cfg=cfg, log=log, socket_path=socket_path ) worker.pid = os.getpid() worker.load_apps() # Simulate what run() does from concurrent.futures import ThreadPoolExecutor worker._executor = ThreadPoolExecutor( max_workers=cfg.dirty_threads, thread_name_prefix=f"dirty-worker-{worker.pid}-" ) assert worker._executor._max_workers == 4 worker._cleanup() assert worker._executor is None benoitc-gunicorn-f5fb19e/tests/test_early_hints.py000066400000000000000000000317461514360242400225720ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP 103 Early Hints support (RFC 8297).""" import pytest from unittest import mock from io import BytesIO # Check if h2 is available for HTTP/2 tests try: import h2.connection import h2.config import h2.events H2_AVAILABLE = True except ImportError: H2_AVAILABLE = False from gunicorn.http import wsgi class MockConfig: """Mock gunicorn configuration.""" def __init__(self): self.is_ssl = False self.workers = 1 self.limit_request_fields = 100 self.limit_request_field_size = 8190 self.limit_request_line = 8190 self.secure_scheme_headers = {} self.forwarded_allow_ips = ['127.0.0.1'] self.forwarder_headers = [] self.strip_header_spaces = False self.permit_obsolete_folding = False self.header_map = "refuse" self.sendfile = True self.errorlog = "-" # HTTP/2 settings self.http2_max_concurrent_streams = 100 self.http2_initial_window_size = 65535 self.http2_max_frame_size = 16384 self.http2_max_header_list_size = 65536 def forwarded_allow_networks(self): return [] class MockRequest: """Mock HTTP request for testing.""" def __init__(self, version=(1, 1)): self.version = version self.method = "GET" self.uri = "/" self.path = "/" self.query = "" self.fragment = "" self.scheme = "http" self.headers = [] self.body = BytesIO(b"") self.proxy_protocol_info = None self._expected_100_continue = False def should_close(self): return False class MockSocket: """Mock socket for testing.""" def __init__(self): self._sent = bytearray() self._closed = False def sendall(self, data): if self._closed: raise OSError("Socket is closed") self._sent.extend(data) def send(self, data): if self._closed: raise OSError("Socket is closed") self._sent.extend(data) return len(data) def get_sent_data(self): return bytes(self._sent) def clear(self): self._sent = bytearray() def close(self): self._closed = True class TestWSGIEarlyHints: """Test WSGI wsgi.early_hints callback.""" def test_early_hints_callback_in_environ(self): """Verify wsgi.early_hints is added to environ.""" cfg = MockConfig() req = MockRequest() sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) assert 'wsgi.early_hints' in environ assert callable(environ['wsgi.early_hints']) def test_send_single_early_hint(self): """Test sending one Link header as early hint.""" cfg = MockConfig() req = MockRequest(version=(1, 1)) sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) # Send early hints environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ]) sent_data = sock.get_sent_data() assert b"HTTP/1.1 103 Early Hints\r\n" in sent_data assert b"Link: ; rel=preload; as=style\r\n" in sent_data assert sent_data.endswith(b"\r\n\r\n") def test_send_multiple_early_hints(self): """Test sending multiple Link headers.""" cfg = MockConfig() req = MockRequest(version=(1, 1)) sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ('Link', '; rel=preload; as=script'), ]) sent_data = sock.get_sent_data() assert b"HTTP/1.1 103 Early Hints\r\n" in sent_data assert b"Link: ; rel=preload; as=style\r\n" in sent_data assert b"Link: ; rel=preload; as=script\r\n" in sent_data def test_early_hints_not_sent_for_http10(self): """Test that early hints are not sent for HTTP/1.0 clients.""" cfg = MockConfig() req = MockRequest(version=(1, 0)) # HTTP/1.0 sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) # Try to send early hints environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ]) # Nothing should be sent for HTTP/1.0 sent_data = sock.get_sent_data() assert sent_data == b"" def test_multiple_early_hints_calls(self): """Test multiple calls to wsgi.early_hints (multiple 103 responses).""" cfg = MockConfig() req = MockRequest(version=(1, 1)) sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) # First early hints call environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=style'), ]) # Second early hints call environ['wsgi.early_hints']([ ('Link', '; rel=preload; as=script'), ]) sent_data = sock.get_sent_data() # Should have two separate 103 responses assert sent_data.count(b"HTTP/1.1 103 Early Hints\r\n") == 2 def test_early_hints_with_bytes_headers(self): """Test early hints with bytes header values.""" cfg = MockConfig() req = MockRequest(version=(1, 1)) sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) # Send with bytes values environ['wsgi.early_hints']([ (b'Link', b'; rel=preload; as=style'), ]) sent_data = sock.get_sent_data() assert b"HTTP/1.1 103 Early Hints\r\n" in sent_data assert b"Link: ; rel=preload; as=style\r\n" in sent_data def test_empty_early_hints(self): """Test early hints with empty headers list.""" cfg = MockConfig() req = MockRequest(version=(1, 1)) sock = MockSocket() resp, environ = wsgi.create(req, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000), cfg) # Send empty headers environ['wsgi.early_hints']([]) sent_data = sock.get_sent_data() # Should still send 103 response with no headers assert sent_data == b"HTTP/1.1 103 Early Hints\r\n\r\n" @pytest.mark.skipif(not H2_AVAILABLE, reason="h2 library not available") class TestHTTP2EarlyHints: """Test HTTP/2 early hints (send_informational method).""" def _create_mock_http2_config(self): """Create mock config for HTTP/2.""" cfg = MockConfig() return cfg def _create_mock_socket(self): """Create mock socket for HTTP/2.""" return MockSocket() def test_send_informational_method_exists(self): """Test that send_informational method exists on HTTP2ServerConnection.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = self._create_mock_http2_config() sock = self._create_mock_socket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) assert hasattr(conn, 'send_informational') assert callable(conn.send_informational) def test_send_informational_invalid_status(self): """Test send_informational raises for non-1xx status.""" from gunicorn.http2.connection import HTTP2ServerConnection from gunicorn.http2.errors import HTTP2Error cfg = self._create_mock_http2_config() sock = self._create_mock_socket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Need to create a stream first client_conn = h2.connection.H2Connection( config=h2.config.H2Configuration(client_side=True) ) client_conn.initiate_connection() # Get client's initial data client_data = client_conn.data_to_send() conn.receive_data(client_data) # Create a request on the client client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) request_data = client_conn.data_to_send() conn.receive_data(request_data) # Try to send 200 as informational (should fail) with pytest.raises(HTTP2Error) as excinfo: conn.send_informational(1, 200, [('link', '')]) assert "Invalid informational status" in str(excinfo.value) def test_send_informational_103(self): """Test sending 103 Early Hints over HTTP/2.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = self._create_mock_http2_config() sock = self._create_mock_socket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create a client connection client_conn = h2.connection.H2Connection( config=h2.config.H2Configuration(client_side=True) ) client_conn.initiate_connection() client_data = client_conn.data_to_send() conn.receive_data(client_data) # Create a request on the client client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) request_data = client_conn.data_to_send() conn.receive_data(request_data) # Clear sent data to isolate the informational response sock.clear() # Send 103 Early Hints conn.send_informational(1, 103, [ ('link', '; rel=preload; as=style'), ]) # Verify data was sent sent_data = sock.get_sent_data() assert len(sent_data) > 0 # Feed the data back to client to verify it's valid HTTP/2 client_conn.receive_data(sent_data) # Client should receive an informational response def test_send_informational_stream_not_found(self): """Test send_informational raises for non-existent stream.""" from gunicorn.http2.connection import HTTP2ServerConnection from gunicorn.http2.errors import HTTP2Error cfg = self._create_mock_http2_config() sock = self._create_mock_socket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Try to send on non-existent stream with pytest.raises(HTTP2Error) as excinfo: conn.send_informational(999, 103, [('link', '')]) assert "not found" in str(excinfo.value) @pytest.mark.skipif(not H2_AVAILABLE, reason="h2 library not available") class TestAsyncHTTP2EarlyHints: """Test async HTTP/2 early hints.""" def test_async_send_informational_method_exists(self): """Test that send_informational method exists on AsyncHTTP2Connection.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = mock.MagicMock() writer = mock.MagicMock() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) assert hasattr(conn, 'send_informational') assert callable(conn.send_informational) class TestASGIEarlyHints: """Test ASGI http.response.informational handling.""" def test_reason_phrase_103(self): """Test that 103 has correct reason phrase.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.MagicMock() worker.cfg = MockConfig() worker.log = mock.MagicMock() protocol = ASGIProtocol(worker) reason = protocol._get_reason_phrase(103) assert reason == "Early Hints" def test_reason_phrase_100(self): """Test that 100 Continue has correct reason phrase.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.MagicMock() worker.cfg = MockConfig() worker.log = mock.MagicMock() protocol = ASGIProtocol(worker) reason = protocol._get_reason_phrase(100) assert reason == "Continue" def test_reason_phrase_101(self): """Test that 101 Switching Protocols has correct reason phrase.""" from gunicorn.asgi.protocol import ASGIProtocol worker = mock.MagicMock() worker.cfg = MockConfig() worker.log = mock.MagicMock() protocol = ASGIProtocol(worker) reason = protocol._get_reason_phrase(101) assert reason == "Switching Protocols" benoitc-gunicorn-f5fb19e/tests/test_gthread.py000066400000000000000000001372771514360242400216750ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for the gthread worker.""" import errno import fcntl import os import selectors import threading import time from collections import deque from concurrent import futures from unittest import mock import pytest from gunicorn.config import Config from gunicorn.workers import gthread class FakeSocket: """Mock socket for testing.""" def __init__(self, data=b''): self.data = data self.closed = False self.blocking = True self._fileno = id(self) % 65536 def fileno(self): return self._fileno def setblocking(self, blocking): self.blocking = blocking def recv(self, size): if self.closed: raise OSError(errno.EBADF, "Bad file descriptor") result = self.data[:size] self.data = self.data[size:] return result def send(self, data): if self.closed: raise OSError(errno.EPIPE, "Broken pipe") return len(data) def close(self): self.closed = True def getsockname(self): return ('127.0.0.1', 8000) def getpeername(self): return ('127.0.0.1', 12345) class TestTConn: """Tests for TConn connection wrapper.""" def test_tconn_init(self): """Test TConn initialization.""" cfg = Config() sock = FakeSocket() client = ('127.0.0.1', 12345) server = ('127.0.0.1', 8000) conn = gthread.TConn(cfg, sock, client, server) assert conn.cfg is cfg assert conn.sock is sock assert conn.client == client assert conn.server == server assert conn.timeout is None assert conn.parser is None assert conn.initialized is False def test_tconn_init_sets_blocking_false(self): """Test that TConn sets socket to non-blocking initially.""" cfg = Config() sock = FakeSocket() sock.setblocking(True) gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) # TConn sets socket to non-blocking in __init__ assert sock.blocking is False def test_tconn_init_method_sets_blocking_true(self): """Test that conn.init() sets socket back to blocking.""" cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) conn.init() assert sock.blocking is True assert conn.initialized is True assert conn.parser is not None def test_tconn_set_timeout(self): """Test timeout setting using monotonic clock.""" cfg = Config() cfg.set('keepalive', 5) sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) before = time.monotonic() conn.set_timeout() after = time.monotonic() assert conn.timeout is not None assert before + 5 <= conn.timeout <= after + 5 def test_tconn_close(self): """Test connection closing.""" cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) conn.close() assert sock.closed is True class TestPollableMethodQueue: """Tests for PollableMethodQueue.""" def test_queue_init_and_close(self): """Test queue initialization and cleanup.""" q = gthread.PollableMethodQueue() q.init() assert q._read_fd is not None assert q._write_fd is not None assert q._queue is not None q.close() def test_queue_defer_and_run(self): """Test deferring and running callbacks.""" q = gthread.PollableMethodQueue() q.init() results = [] q.defer(results.append, 42) # Simulate the selector reading from the pipe q.run_callbacks(None) assert results == [42] q.close() def test_queue_multiple_callbacks(self): """Test multiple callbacks are executed in order.""" q = gthread.PollableMethodQueue() q.init() results = [] for i in range(5): q.defer(results.append, i) q.run_callbacks(None) assert results == [0, 1, 2, 3, 4] q.close() def test_queue_fileno_for_selector(self): """Test that fileno returns a valid fd for selector registration.""" q = gthread.PollableMethodQueue() q.init() fd = q.fileno() assert isinstance(fd, int) assert fd >= 0 # Verify it can be used with a selector sel = selectors.DefaultSelector() sel.register(fd, selectors.EVENT_READ) sel.unregister(fd) sel.close() q.close() def test_queue_thread_safety(self): """Test that defer can be called from multiple threads.""" q = gthread.PollableMethodQueue() q.init() results = [] lock = threading.Lock() def add_callback(n): def callback(): with lock: results.append(n) q.defer(callback) threads = [] for i in range(10): t = threading.Thread(target=add_callback, args=(i,)) threads.append(t) t.start() for t in threads: t.join() # Drain all callbacks (pipe is non-blocking, may take multiple calls) for _ in range(20): q.run_callbacks(None) if len(results) >= 10: break assert len(results) == 10 assert set(results) == set(range(10)) q.close() def test_queue_nonblocking_pipe(self): """Test that pipe is non-blocking (BSD compatibility).""" q = gthread.PollableMethodQueue() q.init() # Verify both ends are non-blocking read_flags = fcntl.fcntl(q._read_fd, fcntl.F_GETFL) write_flags = fcntl.fcntl(q._write_fd, fcntl.F_GETFL) assert read_flags & os.O_NONBLOCK assert write_flags & os.O_NONBLOCK q.close() class TestThreadWorker: """Tests for ThreadWorker.""" def create_worker(self, cfg=None): """Create a worker instance for testing.""" if cfg is None: cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) cfg.set('keepalive', 2) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_worker_init(self): """Test worker initialization.""" worker = self.create_worker() assert worker.worker_connections == 1000 assert worker.max_keepalived == 1000 - 4 # connections - threads assert worker.tpool is None assert worker.poller is None assert worker.nr_conns == 0 assert worker._accepting is False assert isinstance(worker.keepalived_conns, deque) assert isinstance(worker.method_queue, gthread.PollableMethodQueue) def test_worker_check_config_warning(self): """Test that check_config warns when keepalive impossible.""" cfg = Config() cfg.set('worker_connections', 4) cfg.set('threads', 4) cfg.set('keepalive', 2) log = mock.Mock() gthread.ThreadWorker.check_config(cfg, log) log.warning.assert_called() def test_worker_check_config_no_warning(self): """Test that check_config doesn't warn with valid config.""" cfg = Config() cfg.set('worker_connections', 100) cfg.set('threads', 4) cfg.set('keepalive', 2) log = mock.Mock() gthread.ThreadWorker.check_config(cfg, log) log.warning.assert_not_called() def test_worker_init_process(self): """Test worker process initialization.""" worker = self.create_worker() worker.tmp = mock.Mock() worker.log = mock.Mock() # Mock super().init_process() to avoid full initialization with mock.patch.object(gthread.base.Worker, 'init_process'): worker.init_process() assert worker.tpool is not None assert worker.poller is not None assert worker.method_queue._queue is not None # Cleanup worker.tpool.shutdown(wait=False) worker.poller.close() worker.method_queue.close() def test_worker_get_thread_pool(self): """Test thread pool creation.""" worker = self.create_worker() pool = worker.get_thread_pool() assert isinstance(pool, futures.ThreadPoolExecutor) pool.shutdown(wait=False) def test_worker_murder_keepalived(self): """Test that expired keepalive connections are cleaned up.""" worker = self.create_worker() worker.poller = selectors.DefaultSelector() # Create an expired connection (using monotonic to match implementation) cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) conn.timeout = time.monotonic() - 10 # Expired 10 seconds ago worker.keepalived_conns.append(conn) worker.nr_conns = 1 # Register with poller (so it can be unregistered) try: with mock.patch.object(worker.poller, 'unregister'): worker.murder_keepalived() except (OSError, ValueError): pass # Expected with fake socket # Connection should have been removed assert len(worker.keepalived_conns) == 0 assert sock.closed is True worker.poller.close() def test_worker_is_parent_alive(self): """Test parent process check.""" worker = self.create_worker() # With correct ppid worker.ppid = os.getppid() assert worker.is_parent_alive() is True # With wrong ppid worker.ppid = -1 assert worker.is_parent_alive() is False def test_worker_set_accept_enabled(self): """Test enabling and disabling connection acceptance.""" worker = self.create_worker() worker.poller = mock.Mock() # Create a mock socket mock_sock = mock.Mock() mock_sock.getsockname.return_value = ('127.0.0.1', 8000) worker.sockets = [mock_sock] # Initially not accepting assert worker._accepting is False # Enable accepting worker.set_accept_enabled(True) assert worker._accepting is True mock_sock.setblocking.assert_called_with(False) worker.poller.register.assert_called_once() # Disable accepting worker.set_accept_enabled(False) assert worker._accepting is False worker.poller.unregister.assert_called_once() def test_worker_handle_exit(self): """Test graceful shutdown signal handling.""" worker = self.create_worker() worker.method_queue.init() worker.alive = True worker.handle_exit(None, None) assert worker.alive is False worker.method_queue.close() def test_worker_wait_for_events(self): """Test event waiting with dispatch.""" worker = self.create_worker() worker.poller = mock.Mock() # Simulate an event mock_key = mock.Mock() callback = mock.Mock() mock_key.data = callback mock_key.fileobj = mock.Mock() worker.poller.select.return_value = [(mock_key, None)] worker.wait_for_and_dispatch_events(1.0) worker.poller.select.assert_called_once_with(1.0) callback.assert_called_once_with(mock_key.fileobj) class TestFinishRequest: """Tests for finish_request handling.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) worker.poller = mock.Mock() worker.alive = True return worker def test_finish_request_cancelled(self): """Test handling of cancelled future.""" worker = self.create_worker() worker.nr_conns = 1 conn = mock.Mock() fs = mock.Mock() fs.cancelled.return_value = True worker.finish_request(conn, fs) assert worker.nr_conns == 0 conn.close.assert_called_once() def test_finish_request_keepalive(self): """Test handling of keepalive response.""" worker = self.create_worker() worker.nr_conns = 1 conn = mock.Mock() conn.sock = mock.Mock() fs = mock.Mock() fs.cancelled.return_value = False fs.result.return_value = True # keepalive=True worker.finish_request(conn, fs) assert worker.nr_conns == 1 # Connection kept assert conn in worker.keepalived_conns conn.set_timeout.assert_called_once() worker.poller.register.assert_called_once() def test_finish_request_close(self): """Test handling of non-keepalive response.""" worker = self.create_worker() worker.nr_conns = 1 conn = mock.Mock() fs = mock.Mock() fs.cancelled.return_value = False fs.result.return_value = False # keepalive=False worker.finish_request(conn, fs) assert worker.nr_conns == 0 conn.close.assert_called_once() def test_finish_request_exception(self): """Test handling of exception in request.""" worker = self.create_worker() worker.nr_conns = 1 conn = mock.Mock() fs = mock.Mock() fs.cancelled.return_value = False fs.result.side_effect = Exception("Test error") worker.finish_request(conn, fs) assert worker.nr_conns == 0 conn.close.assert_called_once() class TestAccept: """Tests for connection acceptance.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) worker.poller = mock.Mock() worker.tpool = mock.Mock() worker.method_queue = mock.Mock() return worker def test_accept_success(self): """Test successful connection acceptance.""" worker = self.create_worker() worker.nr_conns = 0 client_sock = FakeSocket() client_addr = ('127.0.0.1', 12345) listener = mock.Mock() listener.accept.return_value = (client_sock, client_addr) listener.getsockname.return_value = ('127.0.0.1', 8000) worker.accept(listener) assert worker.nr_conns == 1 worker.tpool.submit.assert_called_once() def test_accept_eagain(self): """Test handling of EAGAIN during accept.""" worker = self.create_worker() worker.nr_conns = 0 listener = mock.Mock() listener.accept.side_effect = OSError(errno.EAGAIN, "Try again") # Should not raise worker.accept(listener) assert worker.nr_conns == 0 def test_accept_econnaborted(self): """Test handling of ECONNABORTED during accept.""" worker = self.create_worker() worker.nr_conns = 0 listener = mock.Mock() listener.accept.side_effect = OSError(errno.ECONNABORTED, "Connection aborted") # Should not raise worker.accept(listener) assert worker.nr_conns == 0 class TestGracefulShutdown: """Tests for graceful shutdown behavior.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) cfg.set('graceful_timeout', 5) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_handle_exit_sets_alive_false(self): """Test that handle_exit begins graceful shutdown.""" worker = self.create_worker() worker.method_queue.init() worker.alive = True worker.handle_exit(None, None) assert worker.alive is False worker.method_queue.close() def test_connection_tracking(self): """Test that connection count is properly tracked.""" worker = self.create_worker() worker.poller = mock.Mock() worker.tpool = mock.Mock() worker.method_queue = mock.Mock() assert worker.nr_conns == 0 # Simulate accept client_sock = FakeSocket() listener = mock.Mock() listener.accept.return_value = (client_sock, ('127.0.0.1', 12345)) listener.getsockname.return_value = ('127.0.0.1', 8000) worker.accept(listener) assert worker.nr_conns == 1 # Simulate finish_request with close conn = mock.Mock() fs = mock.Mock() fs.cancelled.return_value = False fs.result.return_value = False # Not keepalive worker.finish_request(conn, fs) assert worker.nr_conns == 0 class TestKeepaliveManagement: """Tests for keepalive connection management.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 10) cfg.set('keepalive', 2) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) worker.poller = mock.Mock() return worker def test_max_keepalived_calculation(self): """Test that max_keepalived is correctly calculated.""" worker = self.create_worker() # max_keepalived = worker_connections - threads = 10 - 4 = 6 assert worker.max_keepalived == 6 def test_keepalive_timeout_ordering(self): """Test that connections are ordered by timeout for efficient murder.""" worker = self.create_worker() # Add connections with different timeouts cfg = Config() for i in range(3): sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345 + i), ('127.0.0.1', 8000)) conn.timeout = time.monotonic() + (i * 10) # Staggered timeouts worker.keepalived_conns.append(conn) worker.nr_conns += 1 # First connection should have earliest timeout first = worker.keepalived_conns[0] last = worker.keepalived_conns[-1] assert first.timeout < last.timeout def test_murder_only_expired(self): """Test that only expired connections are closed.""" worker = self.create_worker() worker.poller = selectors.DefaultSelector() cfg = Config() # Add one expired and one valid connection expired_sock = FakeSocket() expired_conn = gthread.TConn(cfg, expired_sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) expired_conn.timeout = time.monotonic() - 10 # Expired valid_sock = FakeSocket() valid_conn = gthread.TConn(cfg, valid_sock, ('127.0.0.1', 12346), ('127.0.0.1', 8000)) valid_conn.timeout = time.monotonic() + 100 # Still valid worker.keepalived_conns.append(expired_conn) worker.keepalived_conns.append(valid_conn) worker.nr_conns = 2 with mock.patch.object(worker.poller, 'unregister'): worker.murder_keepalived() # Expired should be closed, valid should remain assert expired_sock.closed is True assert valid_sock.closed is False assert len(worker.keepalived_conns) == 1 assert worker.keepalived_conns[0] is valid_conn assert worker.nr_conns == 1 worker.poller.close() class TestErrorHandling: """Tests for error handling in various scenarios.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) worker.poller = mock.Mock() return worker def test_finish_request_handles_future_exception(self): """Test that finish_request handles exceptions from futures.""" worker = self.create_worker() worker.nr_conns = 1 conn = mock.Mock() fs = mock.Mock() fs.cancelled.return_value = False fs.result.side_effect = RuntimeError("Worker crashed") # Should not raise, should close connection worker.finish_request(conn, fs) assert worker.nr_conns == 0 conn.close.assert_called_once() def test_enqueue_req_submits_to_pool(self): """Test that enqueue_req properly submits to thread pool.""" worker = self.create_worker() worker.tpool = mock.Mock() worker.method_queue = mock.Mock() conn = mock.Mock() worker.enqueue_req(conn) worker.tpool.submit.assert_called_once() def test_wait_for_events_handles_eintr(self): """Test that EINTR is handled gracefully.""" worker = self.create_worker() worker.poller = mock.Mock() worker.poller.select.side_effect = OSError(errno.EINTR, "Interrupted") # Should not raise worker.wait_for_and_dispatch_events(1.0) def test_wait_for_events_raises_other_errors(self): """Test that non-EINTR errors are propagated.""" worker = self.create_worker() worker.poller = mock.Mock() worker.poller.select.side_effect = OSError(errno.EBADF, "Bad file descriptor") with pytest.raises(OSError): worker.wait_for_and_dispatch_events(1.0) class TestConnectionState: """Tests for connection state management.""" def test_tconn_double_init_is_safe(self): """Test that calling init() twice is safe (idempotent).""" cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) conn.init() parser1 = conn.parser conn.init() # Should not reinitialize parser2 = conn.parser assert parser1 is parser2 def test_tconn_close_is_safe(self): """Test that closing a connection is safe.""" cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) conn.close() assert sock.closed is True # Second close should not raise conn.close() def test_keepalive_timeout_uses_monotonic(self): """Test that timeout uses monotonic clock.""" cfg = Config() cfg.set('keepalive', 5) sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) before = time.monotonic() conn.set_timeout() after = time.monotonic() # Timeout should be approximately 5 seconds in the future assert before + 4.9 <= conn.timeout <= after + 5.1 class TestWorkerLiveness: """Tests for worker liveness reporting to the arbiter.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_notify_calls_tmp_notify(self): """Test that worker.notify() calls tmp.notify() for arbiter monitoring.""" worker = self.create_worker() worker.tmp = mock.Mock() worker.notify() worker.tmp.notify.assert_called_once() def test_notify_updates_tmp_mtime(self): """Test that notify updates the temp file mtime for arbiter heartbeat. WorkerTmp.notify() sets mtime using time.monotonic(), and the arbiter checks liveness by comparing (time.monotonic() - last_update()) to timeout. """ from gunicorn.workers.workertmp import WorkerTmp cfg = Config() tmp = WorkerTmp(cfg) # Call notify to set mtime to current monotonic time tmp.notify() # The arbiter checks: time.monotonic() - last_update() <= timeout # After notify(), this difference should be very small diff = time.monotonic() - tmp.last_update() assert diff < 1.0 # Should be nearly zero # Wait and verify the difference grows time.sleep(0.1) diff_later = time.monotonic() - tmp.last_update() assert diff_later > diff # Time has passed tmp.close() def test_worker_notifies_in_run_loop(self): """Test that worker calls notify() during the run loop.""" worker = self.create_worker() worker.tmp = mock.Mock() worker.method_queue.init() worker.poller = mock.Mock() worker.tpool = mock.Mock() worker.sockets = [] worker.alive = True # Track notify calls notify_calls = [] original_notify = worker.notify def tracking_notify(): notify_calls.append(time.monotonic()) original_notify() worker.notify = tracking_notify # Mock poller.select to exit after first iteration call_count = [0] def mock_select(timeout): call_count[0] += 1 if call_count[0] > 1: worker.alive = False return [] worker.poller.select.side_effect = mock_select # Mock is_parent_alive to return True worker.is_parent_alive = mock.Mock(return_value=True) worker.run() # Worker should have called notify at least once assert len(notify_calls) >= 1 worker.method_queue.close() class TestSignalHandling: """Tests for signal handling in gthread worker.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) cfg.set('graceful_timeout', 5) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_handle_exit_sigterm_sets_alive_false(self): """Test that SIGTERM handler sets alive=False for graceful shutdown.""" worker = self.create_worker() worker.method_queue.init() worker.alive = True # Simulate SIGTERM worker.handle_exit(None, None) assert worker.alive is False worker.method_queue.close() def test_handle_exit_wakes_up_poller(self): """Test that SIGTERM handler wakes up the poller via method_queue.""" worker = self.create_worker() worker.method_queue.init() worker.alive = True # After handle_exit, the method_queue should have a callback queued worker.handle_exit(None, None) # Check that something was written to the pipe (to wake poller) # Read from the pipe - should have data import select readable, _, _ = select.select([worker.method_queue.fileno()], [], [], 0) assert len(readable) > 0 worker.method_queue.close() def test_handle_quit_sigquit_immediate_shutdown(self): """Test that SIGQUIT handler triggers immediate shutdown.""" worker = self.create_worker() worker.tpool = mock.Mock() with pytest.raises(SystemExit) as exc_info: worker.handle_quit(None, None) assert exc_info.value.code == 0 worker.tpool.shutdown.assert_called_once_with(wait=False) def test_graceful_shutdown_stops_accepting(self): """Test that graceful shutdown stops accepting new connections.""" worker = self.create_worker() worker.method_queue.init() worker.poller = mock.Mock() worker.tpool = mock.Mock() worker.sockets = [mock.Mock()] worker._accepting = True # Start accepting worker.set_accept_enabled(True) # Simulate SIGTERM worker.handle_exit(None, None) assert worker.alive is False # During run loop, accepting should be disabled worker.set_accept_enabled(False) assert worker._accepting is False worker.method_queue.close() def test_graceful_shutdown_drains_connections(self): """Test that graceful shutdown waits for connections to drain.""" worker = self.create_worker() worker.method_queue.init() worker.poller = mock.Mock() worker.poller.select.return_value = [] worker.tpool = mock.Mock() worker.sockets = [] worker.nr_conns = 1 # One active connection worker.alive = True # Track iterations iterations = [0] def mock_select(timeout): iterations[0] += 1 if iterations[0] == 1: # First iteration: trigger shutdown worker.alive = False elif iterations[0] == 2: # Second iteration: during grace period pass elif iterations[0] >= 3: # Connection finishes worker.nr_conns = 0 return [] worker.poller.select.side_effect = mock_select worker.is_parent_alive = mock.Mock(return_value=True) worker.run() # Should have waited for connections assert iterations[0] >= 2 worker.method_queue.close() def test_sigterm_does_not_interrupt_active_request(self): """Test that SIGTERM doesn't immediately interrupt active requests.""" import signal worker = self.create_worker() worker.method_queue.init() # The base worker sets siginterrupt(SIGTERM, False) in init_signals # This ensures system calls aren't interrupted by SIGTERM # Verify handle_exit just sets alive=False, doesn't raise worker.alive = True worker.handle_exit(signal.SIGTERM, None) assert worker.alive is False # No exception raised, request can continue worker.method_queue.close() class TestWorkerArbiterIntegration: """Integration tests for worker-arbiter communication.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) cfg.set('graceful_timeout', 2) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_worker_detects_parent_death(self): """Test that worker detects when parent process dies.""" worker = self.create_worker() # Valid ppid worker.ppid = os.getppid() assert worker.is_parent_alive() is True # Invalid ppid (simulating parent death) worker.ppid = 99999999 assert worker.is_parent_alive() is False def test_worker_exits_on_parent_death(self): """Test that worker exits when parent dies.""" worker = self.create_worker() worker.method_queue.init() worker.poller = mock.Mock() worker.poller.select.return_value = [] worker.tpool = mock.Mock() worker.sockets = [] worker.alive = True worker.ppid = 99999999 # Invalid ppid iterations = [0] def mock_select(timeout): iterations[0] += 1 return [] worker.poller.select.side_effect = mock_select worker.run() # Should exit immediately due to parent check assert iterations[0] == 1 worker.method_queue.close() def test_worker_tmp_file_can_be_monitored(self): """Test that worker tmp file can be used by arbiter for monitoring. The arbiter monitors workers by checking: time.monotonic() - last_update() <= timeout """ from gunicorn.workers.workertmp import WorkerTmp cfg = Config() tmp = WorkerTmp(cfg) # Worker notifies - sets mtime to current monotonic time tmp.notify() # Arbiter check: time.monotonic() - last_update() should be small diff = time.monotonic() - tmp.last_update() assert diff < 1.0 # Worker just notified, should be nearly zero # If worker stops notifying, the difference grows time.sleep(0.1) diff_later = time.monotonic() - tmp.last_update() assert diff_later > diff # Arbiter would notice worker isn't responding tmp.close() def test_graceful_timeout_honored(self): """Test that graceful_timeout is honored during shutdown.""" worker = self.create_worker() worker.cfg.set('graceful_timeout', 1) # 1 second for testing worker.method_queue.init() worker.poller = mock.Mock() worker.tpool = mock.Mock() worker.sockets = [] worker.nr_conns = 1 # Active connection that won't finish worker.alive = True # Track iterations iterations = [0] start_time = [None] def mock_select(timeout): iterations[0] += 1 if iterations[0] == 1: # First iteration: trigger shutdown worker.alive = False start_time[0] = time.monotonic() return [] else: # Grace period iterations - simulate time passing via select timeout # The timeout should be the remaining time if timeout > 0: # Simulate some time passing time.sleep(min(timeout, 0.2)) # Connection never finishes (nr_conns stays 1) return [] worker.poller.select.side_effect = mock_select worker.is_parent_alive = mock.Mock(return_value=True) worker.run() # Should have completed (grace timeout expired with connection still active) assert iterations[0] >= 2 # At least one grace period iteration worker.method_queue.close() def test_run_completes_cleanup(self): """Test that run() properly cleans up resources on exit.""" worker = self.create_worker() worker.method_queue.init() worker.poller = selectors.DefaultSelector() worker.tpool = futures.ThreadPoolExecutor(max_workers=2) worker.sockets = [] worker.alive = False # Immediately exit worker.is_parent_alive = mock.Mock(return_value=True) # Don't pre-register method_queue - run() will do it worker.run() # All resources should be cleaned up # (No assertion needed - if run() completes without error, cleanup worked) class TestSignalInteraction: """Tests for signal interactions and edge cases.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_multiple_sigterm_is_safe(self): """Test that receiving multiple SIGTERM is safe.""" worker = self.create_worker() worker.method_queue.init() worker.alive = True # Multiple SIGTERM calls should be idempotent worker.handle_exit(None, None) assert worker.alive is False worker.handle_exit(None, None) assert worker.alive is False worker.method_queue.close() def test_sigterm_then_sigquit(self): """Test SIGQUIT after SIGTERM for force kill.""" worker = self.create_worker() worker.method_queue.init() worker.tpool = mock.Mock() worker.alive = True # First SIGTERM for graceful worker.handle_exit(None, None) assert worker.alive is False # Then SIGQUIT for immediate with pytest.raises(SystemExit): worker.handle_quit(None, None) worker.tpool.shutdown.assert_called_once_with(wait=False) worker.method_queue.close() def test_sigquit_does_not_wait_for_threads(self): """Test that SIGQUIT calls tpool.shutdown(wait=False).""" worker = self.create_worker() worker.tpool = mock.Mock() with pytest.raises(SystemExit): worker.handle_quit(None, None) # Verify wait=False was passed worker.tpool.shutdown.assert_called_once_with(wait=False) def test_handle_exit_when_already_dead(self): """Test handle_exit when worker is already shutting down.""" worker = self.create_worker() worker.method_queue.init() worker.alive = False # Should not raise, should be idempotent worker.handle_exit(None, None) assert worker.alive is False worker.method_queue.close() def test_connections_tracked_during_signal(self): """Test that connection count is correct during signal handling.""" worker = self.create_worker() worker.method_queue.init() worker.poller = mock.Mock() worker.tpool = mock.Mock() worker.nr_conns = 5 worker.alive = True # SIGTERM should not affect connection count worker.handle_exit(None, None) assert worker.nr_conns == 5 # Still 5 connections assert worker.alive is False # But shutting down worker.method_queue.close() class TestKeepaliveBlockingMode: """Tests for socket blocking mode on keepalive connections (issue #3448).""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('threads', 4) cfg.set('worker_connections', 1000) cfg.set('keepalive', 2) worker = gthread.ThreadWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_handle_sets_blocking_on_keepalive_connection(self): """Test that handle() sets socket to blocking mode on keepalive connections. On keepalive connections, the socket is in non-blocking mode (set by finish_request() for the selector). handle() must set it back to blocking before reading request/body to avoid SSLWantReadError on SSL connections. """ worker = self.create_worker() worker.wsgi = mock.Mock(return_value=[b'response']) # Create a connection that simulates a keepalive reuse cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) # Simulate the state after finish_request() for keepalive: # - socket is non-blocking (for selector registration) # - connection is already initialized conn.init() # First request initialized the connection sock.setblocking(False) # finish_request() set non-blocking for selector assert sock.blocking is False assert conn.initialized is True # Verify that handle() sets the socket to blocking mode # Mock the parser to avoid actually parsing mock_parser = mock.Mock() mock_parser.__next__ = mock.Mock(return_value=None) # No request conn.parser = mock_parser worker.handle(conn) # Socket should be set to blocking mode by handle() assert sock.blocking is True def test_handle_sets_blocking_before_body_read(self): """Test that socket is blocking before WSGI app reads request body. This is the core fix for issue #3448: Flask's request.get_json() reads the body, which triggers socket.recv(). If the socket is non-blocking, this raises SSLWantReadError on SSL connections. """ worker = self.create_worker() cfg = Config() sock = FakeSocket() conn = gthread.TConn(cfg, sock, ('127.0.0.1', 12345), ('127.0.0.1', 8000)) # Simulate keepalive state conn.init() sock.setblocking(False) # Track when blocking is set vs when body would be read blocking_state_at_body_read = [None] def mock_wsgi(environ, start_response): # This simulates Flask's request.get_json() reading the body # The socket must be blocking at this point blocking_state_at_body_read[0] = sock.blocking start_response('200 OK', []) return [b'response'] worker.wsgi = mock_wsgi # Mock parser to return a request mock_request = mock.Mock() mock_request.headers = [] mock_request.unreader = mock.Mock() mock_request.body = mock.Mock() mock_request.body.read.return_value = b'' mock_parser = mock.Mock() mock_parser.__next__ = mock.Mock(return_value=mock_request) mock_parser.finish_body = mock.Mock() conn.parser = mock_parser # Mock handle_request to invoke wsgi _ = worker.handle_request # save reference before overwriting def mock_handle_request(req, conn): # Simplified version that just calls wsgi worker.wsgi({}, lambda s, h: None) return True worker.handle_request = mock_handle_request worker.handle(conn) # Socket must be blocking when WSGI app reads body assert blocking_state_at_body_read[0] is True class TestFinishBodySSL: """Tests for SSL error handling in finish_body().""" def test_finish_body_handles_ssl_want_read_error(self): """Test that finish_body() handles SSLWantReadError gracefully. When discarding unread body data on SSL connections, the socket may raise SSLWantReadError if there's no application data available. This should be treated as "no more data" rather than an error. """ import ssl from gunicorn.http.parser import RequestParser # Create a mock SSL socket that raises SSLWantReadError on recv class MockSSLSocket: def __init__(self): self._fileno = 123 def fileno(self): return self._fileno def recv(self, size): raise ssl.SSLWantReadError("The operation did not complete") def setblocking(self, blocking): pass cfg = Config() sock = MockSSLSocket() parser = RequestParser(cfg, sock, ('127.0.0.1', 12345)) # Create a mock message with a body that will trigger socket read mock_body = mock.Mock() mock_body.read.side_effect = ssl.SSLWantReadError("The operation did not complete") mock_mesg = mock.Mock() mock_mesg.body = mock_body parser.mesg = mock_mesg # finish_body() should handle SSLWantReadError without raising parser.finish_body() # Should not raise # Verify body.read was called mock_body.read.assert_called_once_with(1024) def test_finish_body_reads_all_data_before_ssl_error(self): """Test that finish_body() reads all available data before SSLWantReadError.""" import ssl from gunicorn.http.parser import RequestParser cfg = Config() # Create a mock socket class MockSocket: def recv(self, size): return b'' def setblocking(self, blocking): pass sock = MockSocket() parser = RequestParser(cfg, sock, ('127.0.0.1', 12345)) # Create a mock message body that returns data then raises SSLWantReadError call_count = [0] def mock_read(size): call_count[0] += 1 if call_count[0] <= 2: return b'x' * size # Return data first two times raise ssl.SSLWantReadError("The operation did not complete") mock_body = mock.Mock() mock_body.read.side_effect = mock_read mock_mesg = mock.Mock() mock_mesg.body = mock_body parser.mesg = mock_mesg # finish_body() should read all data and handle SSLWantReadError parser.finish_body() # Should not raise # Verify body.read was called multiple times (2 data reads + 1 error) assert call_count[0] == 3 def test_finish_body_normal_operation(self): """Test that finish_body() works normally when no SSL error occurs.""" from gunicorn.http.parser import RequestParser cfg = Config() class MockSocket: def recv(self, size): return b'' def setblocking(self, blocking): pass sock = MockSocket() parser = RequestParser(cfg, sock, ('127.0.0.1', 12345)) # Create a mock message body that returns empty (end of data) mock_body = mock.Mock() mock_body.read.return_value = b'' mock_mesg = mock.Mock() mock_mesg.body = mock_body parser.mesg = mock_mesg # finish_body() should work normally parser.finish_body() # Verify body.read was called once and returned empty mock_body.read.assert_called_once_with(1024) class TestHTTP2TrailerCallback: """Tests for HTTP/2 response trailer callback.""" def test_trailer_callback_stores_trailers(self): """Test that the trailer callback stores trailers for later sending.""" # Simulate the trailer callback pattern used in handle_http2_request pending_trailers = [] def send_trailers_h2(trailers): """Queue trailers to be sent after response body.""" pending_trailers.extend(trailers) # Call the callback with trailers send_trailers_h2([('grpc-status', '0'), ('grpc-message', 'OK')]) assert len(pending_trailers) == 2 assert pending_trailers[0] == ('grpc-status', '0') assert pending_trailers[1] == ('grpc-message', 'OK') def test_trailer_callback_multiple_calls(self): """Test that multiple calls to trailer callback accumulate trailers.""" pending_trailers = [] def send_trailers_h2(trailers): pending_trailers.extend(trailers) # Call multiple times send_trailers_h2([('grpc-status', '0')]) send_trailers_h2([('grpc-message', 'OK')]) send_trailers_h2([('server-timing', 'total;dur=100')]) assert len(pending_trailers) == 3 assert pending_trailers == [ ('grpc-status', '0'), ('grpc-message', 'OK'), ('server-timing', 'total;dur=100'), ] def test_trailer_callback_empty_list(self): """Test that empty trailer list is handled correctly.""" pending_trailers = [] def send_trailers_h2(trailers): pending_trailers.extend(trailers) send_trailers_h2([]) assert len(pending_trailers) == 0 benoitc-gunicorn-f5fb19e/tests/test_gtornado.py000066400000000000000000000355561514360242400220710ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for the tornado worker.""" import os from unittest import mock import pytest tornado = pytest.importorskip("tornado") from gunicorn.config import Config from gunicorn.workers import gtornado class FakeSocket: """Mock socket for testing.""" def __init__(self, data=b''): self.data = data self.closed = False self.blocking = True self._fileno = id(self) % 65536 def fileno(self): return self._fileno def setblocking(self, blocking): self.blocking = blocking def recv(self, size): result = self.data[:size] self.data = self.data[size:] return result def send(self, data): return len(data) def close(self): self.closed = True def getsockname(self): return ('127.0.0.1', 8000) def getpeername(self): return ('127.0.0.1', 12345) class TestTornadoWorkerInit: """Tests for TornadoWorker initialization.""" def create_worker(self, cfg=None): """Create a worker instance for testing.""" if cfg is None: cfg = Config() cfg.set('workers', 1) cfg.set('max_requests', 0) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_worker_init(self): """Test worker initialization.""" worker = self.create_worker() assert worker.nr == 0 def test_init_process_clears_ioloop(self): """Test that init_process clears the current IOLoop.""" worker = self.create_worker() worker.tmp = mock.Mock() worker.log = mock.Mock() with mock.patch.object(gtornado.IOLoop, 'clear_current') as mock_clear: with mock.patch.object(gtornado.Worker, 'init_process'): worker.init_process() mock_clear.assert_called_once() class TestRequestCounting: """Tests for request counting and max_requests behavior.""" def create_worker(self, cfg=None): """Create a worker instance for testing.""" if cfg is None: cfg = Config() cfg.set('workers', 1) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_handle_request_increments_counter(self): """Test that handle_request increments the request counter.""" worker = self.create_worker() worker.nr = 0 worker.max_requests = 100 worker.alive = True worker.handle_request() assert worker.nr == 1 assert worker.alive is True def test_max_requests_triggers_shutdown(self): """Test that reaching max_requests triggers shutdown.""" cfg = Config() cfg.set('max_requests', 5) worker = self.create_worker(cfg) worker.nr = 4 worker.alive = True worker.max_requests = 5 worker.handle_request() assert worker.nr == 5 assert worker.alive is False class TestSignalHandling: """Tests for signal handling in tornado worker.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_handle_exit_sets_alive_false(self): """Test that handle_exit sets alive=False through parent.""" worker = self.create_worker() worker.alive = True # The parent's handle_exit is what sets alive=False worker.handle_exit(None, None) assert worker.alive is False def test_handle_exit_only_once(self): """Test that handle_exit only triggers once when alive.""" worker = self.create_worker() worker.alive = True # First call should set alive=False worker.handle_exit(None, None) assert worker.alive is False # Second call should do nothing (alive is already False) # Track that super().handle_exit is not called again with mock.patch.object(gtornado.Worker, 'handle_exit') as mock_exit: worker.handle_exit(None, None) mock_exit.assert_not_called() class TestWatchdog: """Tests for watchdog functionality.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_watchdog_notifies_when_alive(self): """Test that watchdog calls notify when alive.""" worker = self.create_worker() worker.alive = True worker.ppid = os.getppid() worker.tmp = mock.Mock() worker.watchdog() worker.tmp.notify.assert_called_once() def test_watchdog_detects_parent_death(self): """Test that watchdog detects parent death.""" worker = self.create_worker() worker.alive = True worker.ppid = 99999999 # Invalid ppid worker.tmp = mock.Mock() worker.watchdog() assert worker.alive is False class TestHeartbeat: """Tests for heartbeat functionality.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_heartbeat_stops_server_when_not_alive(self): """Test that heartbeat stops the server when not alive.""" worker = self.create_worker() worker.alive = False worker.server_alive = True worker.server = mock.Mock() worker.heartbeat() worker.server.stop.assert_called_once() assert worker.server_alive is False def test_heartbeat_stops_ioloop_after_server(self): """Test that heartbeat stops IOLoop after server is stopped.""" worker = self.create_worker() worker.alive = False worker.server_alive = False worker.callbacks = [mock.Mock(), mock.Mock()] worker.ioloop = mock.Mock() worker.heartbeat() for callback in worker.callbacks: callback.stop.assert_called_once() worker.ioloop.stop.assert_called_once() class TestAppWrapping: """Tests for app wrapping logic.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_wsgi_callable_wrapped_in_container(self): """Test that a plain WSGI callable gets wrapped in WSGIContainer.""" from tornado.wsgi import WSGIContainer def wsgi_app(environ, start_response): pass # Test that WSGIContainer is used for plain WSGI apps app = wsgi_app if not isinstance(app, WSGIContainer) and \ not isinstance(app, tornado.web.Application): app = WSGIContainer(app) assert isinstance(app, WSGIContainer) def test_tornado_application_not_wrapped(self): """Test that tornado.web.Application is not wrapped.""" from tornado.wsgi import WSGIContainer tornado_app = tornado.web.Application([]) # Test the wrapping logic app = tornado_app if not isinstance(app, WSGIContainer) and \ not isinstance(app, tornado.web.Application): app = WSGIContainer(app) # Should NOT be wrapped assert isinstance(app, tornado.web.Application) assert not isinstance(app, WSGIContainer) class TestSetup: """Tests for the setup class method.""" def test_setup_patches_request_handler(self): """Test that setup patches RequestHandler.clear.""" # Save original original_clear = tornado.web.RequestHandler.clear try: gtornado.TornadoWorker.setup() # Create a mock handler to test the patched clear method mock_handler = mock.Mock() mock_handler._headers = {"Server": "TornadoServer/1.0"} # Call the patched clear new_clear = tornado.web.RequestHandler.clear assert new_clear is not original_clear finally: # Restore original tornado.web.RequestHandler.clear = original_clear class TestRunMethod: """Tests for the run method.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('keepalive', 2) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_run_sets_up_callbacks(self): """Test that run sets up periodic callbacks.""" worker = self.create_worker() worker.wsgi = tornado.web.Application([]) worker.sockets = [] mock_ioloop = mock.Mock() mock_callback = mock.Mock() with mock.patch.object(gtornado.IOLoop, 'instance', return_value=mock_ioloop): with mock.patch.object(gtornado, 'PeriodicCallback', return_value=mock_callback) as mock_pc: # Start the run method but stop it immediately mock_ioloop.start.side_effect = lambda: None worker.run() # Should create two callbacks (watchdog and heartbeat) assert mock_pc.call_count == 2 assert mock_callback.start.call_count == 2 def test_run_creates_http_server(self): """Test that run creates an HTTP server.""" worker = self.create_worker() worker.wsgi = tornado.web.Application([]) worker.sockets = [] mock_ioloop = mock.Mock() mock_ioloop.start.side_effect = lambda: None with mock.patch.object(gtornado.IOLoop, 'instance', return_value=mock_ioloop): with mock.patch.object(gtornado, 'PeriodicCallback', return_value=mock.Mock()): worker.run() assert worker.server is not None assert worker.server_alive is True def test_run_adds_sockets_to_server(self): """Test that run adds sockets to the server.""" worker = self.create_worker() worker.wsgi = tornado.web.Application([]) mock_socket = FakeSocket() worker.sockets = [mock_socket] mock_ioloop = mock.Mock() mock_ioloop.start.side_effect = lambda: None with mock.patch.object(gtornado.IOLoop, 'instance', return_value=mock_ioloop): with mock.patch.object(gtornado, 'PeriodicCallback', return_value=mock.Mock()): with mock.patch.object(tornado.httpserver.HTTPServer, 'add_socket'): worker.run() # Socket should be set to non-blocking (setblocking(0)) assert not mock_socket.blocking class TestSSLSupport: """Tests for SSL support.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) cfg.set('keepalive', 2) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_ssl_server_creation(self): """Test that SSL server is created when is_ssl is True.""" worker = self.create_worker() worker.wsgi = tornado.web.Application([]) worker.sockets = [] mock_ioloop = mock.Mock() mock_ioloop.start.side_effect = lambda: None mock_ssl_context = mock.Mock() # Mock cfg.is_ssl property to return True with mock.patch.object(type(worker.cfg), 'is_ssl', new_callable=mock.PropertyMock, return_value=True): with mock.patch.object(gtornado.IOLoop, 'instance', return_value=mock_ioloop): with mock.patch.object(gtornado, 'PeriodicCallback', return_value=mock.Mock()): with mock.patch.object(gtornado, 'ssl_context', return_value=mock_ssl_context): worker.run() # Server should be created with ssl_options assert worker.server is not None class TestKeepAlive: """Tests for keep-alive configuration.""" def create_worker(self): """Create a worker for testing.""" cfg = Config() cfg.set('workers', 1) worker = gtornado.TornadoWorker( age=1, ppid=os.getpid(), sockets=[], app=mock.Mock(), timeout=30, cfg=cfg, log=mock.Mock(), ) return worker def test_keep_alive_enabled(self): """Test that keep-alive is enabled when keepalive > 0.""" worker = self.create_worker() worker.wsgi = tornado.web.Application([]) worker.cfg.set('keepalive', 2) worker.sockets = [] mock_ioloop = mock.Mock() mock_ioloop.start.side_effect = lambda: None with mock.patch.object(gtornado.IOLoop, 'instance', return_value=mock_ioloop): with mock.patch.object(gtornado, 'PeriodicCallback', return_value=mock.Mock()): worker.run() assert worker.server.no_keep_alive is False def test_keep_alive_disabled(self): """Test that keep-alive is disabled when keepalive <= 0.""" worker = self.create_worker() worker.wsgi = tornado.web.Application([]) worker.cfg.set('keepalive', 0) worker.sockets = [] mock_ioloop = mock.Mock() mock_ioloop.start.side_effect = lambda: None with mock.patch.object(gtornado.IOLoop, 'instance', return_value=mock_ioloop): with mock.patch.object(gtornado, 'PeriodicCallback', return_value=mock.Mock()): worker.run() assert worker.server.no_keep_alive is True benoitc-gunicorn-f5fb19e/tests/test_http.py000066400000000000000000000170771514360242400212310ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import t import pytest from unittest import mock from gunicorn import util from gunicorn.http.body import Body, LengthReader, EOFReader from gunicorn.http.wsgi import Response from gunicorn.http.unreader import Unreader, IterUnreader, SocketUnreader from gunicorn.http.errors import InvalidHeader, InvalidHeaderName, InvalidHTTPVersion from gunicorn.http.message import TOKEN_RE def test_method_pattern(): assert TOKEN_RE.fullmatch("GET") assert TOKEN_RE.fullmatch("MKCALENDAR") assert not TOKEN_RE.fullmatch("GET:") assert not TOKEN_RE.fullmatch("GET;") RFC9110_5_6_2_TOKEN_DELIM = r'"(),/:;<=>?@[\]{}' for bad_char in RFC9110_5_6_2_TOKEN_DELIM: assert not TOKEN_RE.match(bad_char) def assert_readline(payload, size, expected): body = Body(io.BytesIO(payload)) assert body.readline(size) == expected def test_readline_empty_body(): assert_readline(b"", None, b"") assert_readline(b"", 1, b"") def test_readline_zero_size(): assert_readline(b"abc", 0, b"") assert_readline(b"\n", 0, b"") def test_readline_new_line_before_size(): body = Body(io.BytesIO(b"abc\ndef")) assert body.readline(4) == b"abc\n" assert body.readline() == b"def" def test_readline_new_line_after_size(): body = Body(io.BytesIO(b"abc\ndef")) assert body.readline(2) == b"ab" assert body.readline() == b"c\n" def test_readline_no_new_line(): body = Body(io.BytesIO(b"abcdef")) assert body.readline() == b"abcdef" body = Body(io.BytesIO(b"abcdef")) assert body.readline(2) == b"ab" assert body.readline(2) == b"cd" assert body.readline(2) == b"ef" def test_readline_buffer_loaded(): reader = io.BytesIO(b"abc\ndef") body = Body(reader) body.read(1) # load internal buffer reader.write(b"g\nhi") reader.seek(7) assert body.readline() == b"bc\n" assert body.readline() == b"defg\n" assert body.readline() == b"hi" def test_readline_buffer_loaded_with_size(): body = Body(io.BytesIO(b"abc\ndef")) body.read(1) # load internal buffer assert body.readline(2) == b"bc" assert body.readline(2) == b"\n" assert body.readline(2) == b"de" assert body.readline(2) == b"f" def test_http_header_encoding(): """ tests whether http response headers are USASCII encoded """ mocked_socket = mock.MagicMock() mocked_socket.sendall = mock.MagicMock() mocked_request = mock.MagicMock() response = Response(mocked_request, mocked_socket, None) # set umlaut header value - latin-1 is OK response.headers.append(('foo', 'häder')) response.send_headers() # set a-breve header value - unicode, non-latin-1 fails response = Response(mocked_request, mocked_socket, None) response.headers.append(('apple', 'măr')) with pytest.raises(UnicodeEncodeError): response.send_headers() # build our own header_str to compare against tosend = response.default_headers() tosend.extend(["%s: %s\r\n" % (k, v) for k, v in response.headers]) header_str = "%s\r\n" % "".join(tosend) with pytest.raises(UnicodeEncodeError): mocked_socket.sendall(util.to_bytestring(header_str, "ascii")) def test_http_invalid_response_header(): """ tests whether http response headers are contains control chars """ mocked_socket = mock.MagicMock() mocked_socket.sendall = mock.MagicMock() mocked_request = mock.MagicMock() response = Response(mocked_request, mocked_socket, None) with pytest.raises(InvalidHeader): response.start_response("200 OK", [('foo', 'essai\r\n')]) response = Response(mocked_request, mocked_socket, None) with pytest.raises(InvalidHeaderName): response.start_response("200 OK", [('foo\r\n', 'essai')]) def test_unreader_read_when_size_is_none(): unreader = Unreader() unreader.chunk = mock.MagicMock(side_effect=[b'qwerty', b'123456', b'']) assert unreader.read(size=None) == b'qwerty' assert unreader.read(size=None) == b'123456' assert unreader.read(size=None) == b'' def test_unreader_unread(): unreader = Unreader() unreader.unread(b'hi there') assert b'hi there' in unreader.read() def test_unreader_unread_should_place_data_at_the_beginning_of_the_buffer(): unreader = IterUnreader([b"abc", b"def"]) ab = unreader.read(2) unreader.unread(ab) assert unreader.read(None) == b"abc" def test_unreader_read_zero_size(): unreader = Unreader() unreader.chunk = mock.MagicMock(side_effect=[b'qwerty', b'asdfgh']) assert unreader.read(size=0) == b'' def test_unreader_read_with_nonzero_size(): unreader = Unreader() unreader.chunk = mock.MagicMock(side_effect=[ b'qwerty', b'asdfgh', b'zxcvbn', b'123456', b'', b'' ]) assert unreader.read(size=5) == b'qwert' assert unreader.read(size=5) == b'yasdf' assert unreader.read(size=5) == b'ghzxc' assert unreader.read(size=5) == b'vbn12' assert unreader.read(size=5) == b'3456' assert unreader.read(size=5) == b'' def test_unreader_raises_excpetion_on_invalid_size(): unreader = Unreader() with pytest.raises(TypeError): unreader.read(size='foobar') with pytest.raises(TypeError): unreader.read(size=3.14) with pytest.raises(TypeError): unreader.read(size=[]) def test_iter_unreader_chunk(): iter_unreader = IterUnreader((b'ab', b'cd', b'ef')) assert iter_unreader.chunk() == b'ab' assert iter_unreader.chunk() == b'cd' assert iter_unreader.chunk() == b'ef' assert iter_unreader.chunk() == b'' assert iter_unreader.chunk() == b'' def test_socket_unreader_chunk(): fake_sock = t.FakeSocket(io.BytesIO(b'Lorem ipsum dolor')) sock_unreader = SocketUnreader(fake_sock, max_chunk=5) assert sock_unreader.chunk() == b'Lorem' assert sock_unreader.chunk() == b' ipsu' assert sock_unreader.chunk() == b'm dol' assert sock_unreader.chunk() == b'or' assert sock_unreader.chunk() == b'' def test_length_reader_read(): unreader = IterUnreader((b'Lorem', b'ipsum', b'dolor', b'sit', b'amet')) reader = LengthReader(unreader, 13) assert reader.read(0) == b'' assert reader.read(5) == b'Lorem' assert reader.read(6) == b'ipsumd' assert reader.read(4) == b'ol' assert reader.read(100) == b'' reader = LengthReader(unreader, 10) assert reader.read(0) == b'' assert reader.read(5) == b'orsit' assert reader.read(5) == b'amet' assert reader.read(100) == b'' def test_length_reader_read_invalid_size(): reader = LengthReader(None, 5) with pytest.raises(TypeError): reader.read('100') with pytest.raises(TypeError): reader.read([100]) with pytest.raises(ValueError): reader.read(-100) def test_eof_reader_read(): unreader = IterUnreader((b'Lorem', b'ipsum', b'dolor', b'sit', b'amet')) reader = EOFReader(unreader) assert reader.read(0) == b'' assert reader.read(5) == b'Lorem' assert reader.read(5) == b'ipsum' assert reader.read(3) == b'dol' assert reader.read(3) == b'ors' assert reader.read(100) == b'itamet' assert reader.read(100) == b'' def test_eof_reader_read_invalid_size(): reader = EOFReader(None) with pytest.raises(TypeError): reader.read('100') with pytest.raises(TypeError): reader.read([100]) with pytest.raises(ValueError): reader.read(-100) def test_invalid_http_version_error(): assert str(InvalidHTTPVersion('foo')) == "Invalid HTTP Version: 'foo'" assert str(InvalidHTTPVersion((2, 1))) == 'Invalid HTTP Version: (2, 1)' benoitc-gunicorn-f5fb19e/tests/test_http2_alpn.py000066400000000000000000000371241514360242400223200ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP/2 ALPN negotiation.""" import ssl import pytest from unittest import mock from gunicorn import sock def create_mock_ssl_socket(alpn_protocol=None): """Create a mock SSL socket for testing ALPN negotiation.""" mock_socket = mock.Mock(spec=ssl.SSLSocket) mock_socket.selected_alpn_protocol.return_value = alpn_protocol return mock_socket class TestGetAlpnProtocols: """Test _get_alpn_protocols function.""" def test_h1_only_returns_empty(self): """No ALPN needed for HTTP/1.1 only.""" conf = mock.Mock() conf.http_protocols = ["h1"] result = sock._get_alpn_protocols(conf) assert result == [] def test_h2_enabled_returns_alpn_list(self): """Should return ALPN protocols when h2 is enabled.""" conf = mock.Mock() conf.http_protocols = ["h2", "h1"] with mock.patch('gunicorn.http2.is_http2_available', return_value=True): result = sock._get_alpn_protocols(conf) assert "h2" in result assert "http/1.1" in result def test_h2_without_library_returns_empty(self): """Should return empty if h2 library not available.""" conf = mock.Mock() conf.http_protocols = ["h2", "h1"] with mock.patch('gunicorn.http2.is_http2_available', return_value=False): result = sock._get_alpn_protocols(conf) assert result == [] def test_empty_protocols_returns_empty(self): conf = mock.Mock() conf.http_protocols = [] result = sock._get_alpn_protocols(conf) assert result == [] def test_none_protocols_returns_empty(self): conf = mock.Mock() conf.http_protocols = None result = sock._get_alpn_protocols(conf) assert result == [] def test_h2_only(self): """Should work with h2 only.""" conf = mock.Mock() conf.http_protocols = ["h2"] with mock.patch('gunicorn.http2.is_http2_available', return_value=True): result = sock._get_alpn_protocols(conf) assert "h2" in result class TestGetNegotiatedProtocol: """Test get_negotiated_protocol function.""" def test_returns_alpn_protocol(self): ssl_socket = create_mock_ssl_socket(alpn_protocol="h2") result = sock.get_negotiated_protocol(ssl_socket) assert result == "h2" def test_returns_http11(self): ssl_socket = create_mock_ssl_socket(alpn_protocol="http/1.1") result = sock.get_negotiated_protocol(ssl_socket) assert result == "http/1.1" def test_returns_none_when_not_negotiated(self): ssl_socket = create_mock_ssl_socket(alpn_protocol=None) result = sock.get_negotiated_protocol(ssl_socket) assert result is None def test_returns_none_for_non_ssl_socket(self): regular_socket = mock.Mock(spec=[]) # No SSL methods result = sock.get_negotiated_protocol(regular_socket) assert result is None def test_handles_attribute_error(self): """Handle old SSL without selected_alpn_protocol.""" ssl_socket = mock.Mock(spec=ssl.SSLSocket) del ssl_socket.selected_alpn_protocol # Remove the method result = sock.get_negotiated_protocol(ssl_socket) assert result is None def test_handles_ssl_error(self): """Handle SSLError when checking protocol.""" ssl_socket = mock.Mock(spec=ssl.SSLSocket) ssl_socket.selected_alpn_protocol.side_effect = ssl.SSLError() result = sock.get_negotiated_protocol(ssl_socket) assert result is None class TestIsHttp2Negotiated: """Test is_http2_negotiated function.""" def test_returns_true_for_h2(self): ssl_socket = create_mock_ssl_socket(alpn_protocol="h2") result = sock.is_http2_negotiated(ssl_socket) assert result is True def test_returns_false_for_http11(self): ssl_socket = create_mock_ssl_socket(alpn_protocol="http/1.1") result = sock.is_http2_negotiated(ssl_socket) assert result is False def test_returns_false_for_none(self): ssl_socket = create_mock_ssl_socket(alpn_protocol=None) result = sock.is_http2_negotiated(ssl_socket) assert result is False def test_returns_false_for_non_ssl(self): regular_socket = mock.Mock(spec=[]) result = sock.is_http2_negotiated(regular_socket) assert result is False class TestSSLContextAlpnConfiguration: """Test that SSL context configures ALPN properly.""" @pytest.fixture def ssl_config(self, tmp_path): """Create a config with SSL settings.""" # Create dummy cert/key files certfile = tmp_path / "cert.pem" keyfile = tmp_path / "key.pem" certfile.touch() keyfile.touch() conf = mock.Mock() conf.certfile = str(certfile) conf.keyfile = str(keyfile) conf.ca_certs = None conf.cert_reqs = ssl.CERT_NONE conf.ciphers = None conf.http_protocols = ["h2", "h1"] conf.ssl_context = lambda conf, factory: factory() return conf def test_ssl_context_sets_alpn_when_h2_available(self, ssl_config): """SSL context should set ALPN protocols when h2 is available.""" with mock.patch('gunicorn.http2.is_http2_available', return_value=True): with mock.patch('ssl.create_default_context') as mock_ctx: mock_context = mock.Mock() mock_ctx.return_value = mock_context mock_context.load_cert_chain = mock.Mock() try: sock.ssl_context(ssl_config) except Exception: pass # May fail due to dummy certs # Check that set_alpn_protocols was called if mock_context.set_alpn_protocols.called: call_args = mock_context.set_alpn_protocols.call_args[0][0] assert 'h2' in call_args def test_ssl_context_no_alpn_when_h1_only(self): """SSL context should not set ALPN for HTTP/1.1 only.""" conf = mock.Mock() conf.http_protocols = ["h1"] conf.ca_certs = None conf.certfile = "cert.pem" conf.keyfile = "key.pem" conf.cert_reqs = ssl.CERT_NONE conf.ciphers = None conf.ssl_context = lambda conf, factory: factory() with mock.patch('ssl.create_default_context') as mock_ctx: mock_context = mock.Mock() mock_ctx.return_value = mock_context # ALPN should not be set for h1 only alpn_protocols = sock._get_alpn_protocols(conf) assert alpn_protocols == [] class TestAlpnProtocolMap: """Test ALPN protocol mapping.""" def test_h1_maps_to_http11(self): from gunicorn.config import ALPN_PROTOCOL_MAP assert ALPN_PROTOCOL_MAP.get("h1") == "http/1.1" def test_h2_maps_to_h2(self): from gunicorn.config import ALPN_PROTOCOL_MAP assert ALPN_PROTOCOL_MAP.get("h2") == "h2" class TestAsyncWorkerAlpnHandshake: """Test that AsyncWorker performs handshake before ALPN check. This is critical for gevent and eventlet workers where do_handshake_on_connect may be False, causing ALPN negotiation to not complete until first I/O. """ @pytest.fixture def async_worker(self): """Create an AsyncWorker instance for testing.""" from gunicorn.workers.base_async import AsyncWorker worker = AsyncWorker.__new__(AsyncWorker) worker.cfg = mock.MagicMock() worker.cfg.keepalive = 2 worker.cfg.do_handshake_on_connect = False worker.cfg.http_protocols = ["h2", "h1"] worker.alive = True worker.log = mock.MagicMock() worker.wsgi = mock.MagicMock() worker.nr = 0 worker.max_requests = 1000 return worker def test_handshake_called_when_do_handshake_on_connect_false(self, async_worker): """Test that do_handshake() is called when do_handshake_on_connect is False.""" mock_ssl_socket = mock.Mock(spec=ssl.SSLSocket) mock_ssl_socket.selected_alpn_protocol.return_value = None mock_listener = mock.MagicMock() # Mock the rest of handle() to prevent full execution with mock.patch('gunicorn.sock.is_http2_negotiated', return_value=False): with mock.patch('gunicorn.http.get_parser') as mock_parser: mock_parser.return_value = iter([]) try: async_worker.handle(mock_listener, mock_ssl_socket, ('127.0.0.1', 8000)) except StopIteration: pass # Verify handshake was called mock_ssl_socket.do_handshake.assert_called_once() def test_no_handshake_when_do_handshake_on_connect_true(self, async_worker): """Test that do_handshake() is NOT called when do_handshake_on_connect is True.""" async_worker.cfg.do_handshake_on_connect = True mock_ssl_socket = mock.Mock(spec=ssl.SSLSocket) mock_ssl_socket.selected_alpn_protocol.return_value = None mock_listener = mock.MagicMock() with mock.patch('gunicorn.sock.is_http2_negotiated', return_value=False): with mock.patch('gunicorn.http.get_parser') as mock_parser: mock_parser.return_value = iter([]) try: async_worker.handle(mock_listener, mock_ssl_socket, ('127.0.0.1', 8000)) except StopIteration: pass # Verify handshake was NOT called (already done on connect) mock_ssl_socket.do_handshake.assert_not_called() def test_no_handshake_for_non_ssl_socket(self, async_worker): """Test that no handshake is attempted for non-SSL sockets.""" mock_socket = mock.MagicMock() # Regular socket, not ssl.SSLSocket mock_listener = mock.MagicMock() with mock.patch('gunicorn.sock.is_http2_negotiated', return_value=False): with mock.patch('gunicorn.http.get_parser') as mock_parser: mock_parser.return_value = iter([]) try: async_worker.handle(mock_listener, mock_socket, ('127.0.0.1', 8000)) except StopIteration: pass # Non-SSL sockets don't have do_handshake, so it shouldn't be called assert not hasattr(mock_socket, 'do_handshake') or \ not mock_socket.do_handshake.called def test_http2_detected_after_handshake(self, async_worker): """Test that HTTP/2 is properly detected after explicit handshake.""" mock_ssl_socket = mock.Mock(spec=ssl.SSLSocket) mock_ssl_socket.selected_alpn_protocol.return_value = "h2" mock_listener = mock.MagicMock() with mock.patch.object(async_worker, 'handle_http2') as mock_h2: async_worker.handle(mock_listener, mock_ssl_socket, ('127.0.0.1', 8000)) # Verify handshake was called first mock_ssl_socket.do_handshake.assert_called_once() # Verify HTTP/2 handler was invoked mock_h2.assert_called_once() class TestGeventWorkerAlpn: """Test ALPN handling in GeventWorker.""" @pytest.fixture def gevent_worker(self): """Create a GeventWorker instance for testing.""" try: import gevent except ImportError: pytest.skip("gevent not available") from gunicorn.workers.ggevent import GeventWorker worker = GeventWorker.__new__(GeventWorker) worker.cfg = mock.MagicMock() worker.cfg.keepalive = 2 worker.cfg.do_handshake_on_connect = False worker.cfg.http_protocols = ["h2", "h1"] worker.cfg.is_ssl = True worker.alive = True worker.log = mock.MagicMock() worker.wsgi = mock.MagicMock() worker.nr = 0 worker.max_requests = 1000 worker.worker_connections = 1000 return worker def test_gevent_inherits_async_worker(self): """Test that GeventWorker inherits from AsyncWorker.""" try: import gevent except ImportError: pytest.skip("gevent not available") from gunicorn.workers.ggevent import GeventWorker from gunicorn.workers.base_async import AsyncWorker assert issubclass(GeventWorker, AsyncWorker) def test_gevent_handle_calls_super(self, gevent_worker): """Test that GeventWorker.handle() calls super().handle().""" mock_client = mock.MagicMock() mock_listener = mock.MagicMock() with mock.patch('gunicorn.workers.base_async.AsyncWorker.handle') as mock_super: gevent_worker.handle(mock_listener, mock_client, ('127.0.0.1', 8000)) mock_super.assert_called_once() class TestEventletWorkerAlpn: """Test ALPN handling in EventletWorker.""" @pytest.fixture def eventlet_worker(self): """Create an EventletWorker instance for testing.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import EventletWorker worker = EventletWorker.__new__(EventletWorker) worker.cfg = mock.MagicMock() worker.cfg.keepalive = 2 worker.cfg.do_handshake_on_connect = False worker.cfg.http_protocols = ["h2", "h1"] worker.cfg.is_ssl = True worker.alive = True worker.log = mock.MagicMock() worker.wsgi = mock.MagicMock() worker.nr = 0 worker.max_requests = 1000 worker.worker_connections = 1000 return worker def test_eventlet_inherits_async_worker(self): """Test that EventletWorker inherits from AsyncWorker.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import EventletWorker from gunicorn.workers.base_async import AsyncWorker assert issubclass(EventletWorker, AsyncWorker) def test_eventlet_handle_wraps_ssl_then_calls_super(self, eventlet_worker): """Test that EventletWorker.handle() wraps SSL then calls super().""" from gunicorn.workers import geventlet mock_client = mock.MagicMock() mock_wrapped = mock.MagicMock() mock_listener = mock.MagicMock() with mock.patch.object(geventlet, 'ssl_wrap_socket', return_value=mock_wrapped): with mock.patch('gunicorn.workers.base_async.AsyncWorker.handle') as mock_super: eventlet_worker.handle(mock_listener, mock_client, ('127.0.0.1', 8000)) # Verify super().handle() was called with the wrapped socket mock_super.assert_called_once() call_args = mock_super.call_args[0] assert call_args[1] == mock_wrapped # Second arg is the client socket def test_eventlet_alpn_works_with_handshake_fix(self, eventlet_worker): """Test that ALPN detection works after handshake fix for eventlet.""" from gunicorn.workers import geventlet mock_ssl_socket = mock.Mock(spec=ssl.SSLSocket) mock_ssl_socket.selected_alpn_protocol.return_value = "h2" mock_listener = mock.MagicMock() with mock.patch.object(geventlet, 'ssl_wrap_socket', return_value=mock_ssl_socket): with mock.patch.object(eventlet_worker, 'handle_http2') as mock_h2: eventlet_worker.handle(mock_listener, mock.MagicMock(), ('127.0.0.1', 8000)) # Verify handshake was called (by base_async.handle) mock_ssl_socket.do_handshake.assert_called_once() # Verify HTTP/2 handler was invoked mock_h2.assert_called_once() benoitc-gunicorn-f5fb19e/tests/test_http2_async_connection.py000066400000000000000000001026021514360242400247140ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for async HTTP/2 server connection.""" import asyncio import pytest from unittest import mock from io import BytesIO # Check if h2 is available for integration tests try: import h2.connection import h2.config import h2.events H2_AVAILABLE = True except ImportError: H2_AVAILABLE = False from gunicorn.http2.errors import ( HTTP2Error, HTTP2ConnectionError ) pytestmark = pytest.mark.skipif(not H2_AVAILABLE, reason="h2 library not available") class MockConfig: """Mock gunicorn configuration for HTTP/2.""" def __init__(self): self.http2_max_concurrent_streams = 100 self.http2_initial_window_size = 65535 self.http2_max_frame_size = 16384 self.http2_max_header_list_size = 65536 class MockAsyncReader: """Mock asyncio StreamReader for testing.""" def __init__(self, data=b''): self._buffer = BytesIO(data) self._eof = False async def read(self, n=-1): data = self._buffer.read(n) if not data and self._eof: return b'' return data def set_data(self, data): self._buffer = BytesIO(data) def set_eof(self): self._eof = True self._buffer = BytesIO(b'') class MockAsyncWriter: """Mock asyncio StreamWriter for testing.""" def __init__(self): self._buffer = bytearray() self._closed = False self._drained = False def write(self, data): if self._closed: raise OSError("Writer is closed") self._buffer.extend(data) async def drain(self): self._drained = True def close(self): self._closed = True async def wait_closed(self): pass def get_written_data(self): return bytes(self._buffer) def clear(self): self._buffer.clear() def create_client_connection(): """Create an h2 client connection for generating test frames.""" config = h2.config.H2Configuration(client_side=True) conn = h2.connection.H2Connection(config=config) conn.initiate_connection() return conn class TestAsyncHTTP2ConnectionInit: """Test AsyncHTTP2Connection initialization.""" def test_basic_initialization(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) assert conn.cfg is cfg assert conn.reader is reader assert conn.writer is writer assert conn.client_addr == ('127.0.0.1', 12345) assert conn.streams == {} assert conn.is_closed is False assert conn._initialized is False def test_settings_from_config(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() cfg.http2_max_concurrent_streams = 50 reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) assert conn.max_concurrent_streams == 50 class TestAsyncHTTP2ConnectionInitiate: """Test async connection initiation.""" @pytest.mark.asyncio async def test_initiate_connection(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() assert conn._initialized is True written_data = writer.get_written_data() assert len(written_data) > 0 @pytest.mark.asyncio async def test_initiate_connection_idempotent(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() first_len = len(writer.get_written_data()) await conn.initiate_connection() second_len = len(writer.get_written_data()) assert first_len == second_len class TestAsyncHTTP2ConnectionReceiveData: """Test async receiving and processing data.""" @pytest.mark.asyncio async def test_receive_empty_data_closes_connection(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() reader.set_eof() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() requests = await conn.receive_data() assert conn.is_closed is True assert requests == [] @pytest.mark.asyncio async def test_receive_simple_get_request(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() # Create client and exchange settings client = create_client_connection() client_preface = client.data_to_send() reader.set_data(client_preface) await conn.receive_data() server_data = writer.get_written_data() if server_data: client.receive_data(server_data) # Client sends GET request client.send_headers( stream_id=1, headers=[ (':method', 'GET'), (':path', '/async-test'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True ) reader.set_data(client.data_to_send()) requests = await conn.receive_data() assert len(requests) == 1 assert requests[0].method == 'GET' assert requests[0].path == '/async-test' @pytest.mark.asyncio async def test_receive_with_timeout(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() client = create_client_connection() reader.set_data(client.data_to_send()) # Should complete without timeout await conn.receive_data(timeout=5.0) @pytest.mark.asyncio async def test_receive_timeout_raises(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() # Create a reader that blocks forever async def blocking_read(n): await asyncio.sleep(10) return b'' reader = mock.Mock() reader.read = mock.AsyncMock(side_effect=blocking_read) writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() # Timeout is converted to HTTP2ConnectionError by the implementation with pytest.raises((asyncio.TimeoutError, HTTP2ConnectionError)): await conn.receive_data(timeout=0.01) class TestAsyncHTTP2ConnectionSendResponse: """Test async sending responses.""" @pytest.mark.asyncio async def test_send_simple_response(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() # Setup stream via request client = create_client_connection() reader.set_data(client.data_to_send()) await conn.receive_data() client.receive_data(writer.get_written_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client.data_to_send()) await conn.receive_data() writer.clear() await conn.send_response( stream_id=1, status=200, headers=[('content-type', 'text/plain')], body=b'Async Hello!' ) events = client.receive_data(writer.get_written_data()) data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert len(data_events) == 1 assert data_events[0].data == b'Async Hello!' @pytest.mark.asyncio async def test_send_response_invalid_stream(self): """Test that sending response on invalid stream returns False.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() # Sending to a non-existent stream should return False gracefully result = await conn.send_response(stream_id=999, status=200, headers=[], body=None) assert result is False class TestAsyncHTTP2ConnectionSendData: """Test async send_data method.""" @pytest.mark.asyncio async def test_send_data(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() # Setup stream client = create_client_connection() reader.set_data(client.data_to_send()) await conn.receive_data() client.receive_data(writer.get_written_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client.data_to_send()) await conn.receive_data() # Send full response using send_response writer.clear() await conn.send_response( stream_id=1, status=200, headers=[('content-type', 'text/plain')], body=b'chunk1chunk2' ) events = client.receive_data(writer.get_written_data()) data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert len(data_events) >= 1 all_data = b''.join(e.data for e in data_events) assert all_data == b'chunk1chunk2' def get_h2_header_value(headers_list, name): """Extract a header value from h2 headers list.""" for header_name, header_value in headers_list: name_str = header_name.decode() if isinstance(header_name, bytes) else header_name if name_str == name: return header_value.decode() if isinstance(header_value, bytes) else header_value return None class TestAsyncHTTP2ConnectionSendError: """Test async error response sending.""" @pytest.mark.asyncio async def test_send_error(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() client = create_client_connection() reader.set_data(client.data_to_send()) await conn.receive_data() client.receive_data(writer.get_written_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client.data_to_send()) await conn.receive_data() writer.clear() await conn.send_error(stream_id=1, status_code=500, message="Internal Error") events = client.receive_data(writer.get_written_data()) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] assert len(response_events) == 1 headers_list = response_events[0].headers assert get_h2_header_value(headers_list, ':status') == '500' class TestAsyncHTTP2ConnectionResetStream: """Test async stream reset.""" @pytest.mark.asyncio async def test_reset_stream(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() client = create_client_connection() reader.set_data(client.data_to_send()) await conn.receive_data() client.receive_data(writer.get_written_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=False) reader.set_data(client.data_to_send()) await conn.receive_data() writer.clear() await conn.reset_stream(stream_id=1, error_code=0x8) events = client.receive_data(writer.get_written_data()) reset_events = [e for e in events if isinstance(e, h2.events.StreamReset)] assert len(reset_events) == 1 class TestAsyncHTTP2ConnectionClose: """Test async connection close.""" @pytest.mark.asyncio async def test_close_connection(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() client = create_client_connection() reader.set_data(client.data_to_send()) await conn.receive_data() writer.clear() await conn.close() assert conn.is_closed is True assert writer._closed is True @pytest.mark.asyncio async def test_close_idempotent(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() await conn.close() await conn.close() # Should not raise class TestAsyncHTTP2ConnectionCleanup: """Test async stream cleanup.""" @pytest.mark.asyncio async def test_cleanup_stream(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() client = create_client_connection() reader.set_data(client.data_to_send()) await conn.receive_data() client.receive_data(writer.get_written_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client.data_to_send()) await conn.receive_data() assert 1 in conn.streams conn.cleanup_stream(1) assert 1 not in conn.streams class TestAsyncHTTP2ConnectionRepr: """Test async connection representation.""" def test_repr(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) repr_str = repr(conn) assert "AsyncHTTP2Connection" in repr_str assert "streams=" in repr_str class TestAsyncHTTP2ConnectionSocketErrors: """Test socket error handling in async connection.""" @pytest.mark.asyncio async def test_read_error_raises_connection_error(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = mock.Mock() reader.read = mock.AsyncMock(side_effect=OSError("Connection reset")) writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) await conn.initiate_connection() with pytest.raises(HTTP2ConnectionError): await conn.receive_data() @pytest.mark.asyncio async def test_write_error_raises_connection_error(self): from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = mock.Mock() writer.write = mock.Mock(side_effect=OSError("Broken pipe")) writer.drain = mock.AsyncMock() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) with pytest.raises(HTTP2ConnectionError): await conn.initiate_connection() class TestAsyncHTTP2ConnectionPriority: """Test async HTTP/2 priority handling.""" @pytest.mark.asyncio async def test_handle_priority_updated_existing_stream(self): """Test handling priority update for existing stream.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create a client connection to generate frames client_conn = create_client_connection() client_data = client_conn.data_to_send() # Set up reader with client preface reader.set_data(client_data) await conn.initiate_connection() await conn.receive_data() writer.clear() # Send a request to create a stream client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ]) request_data = client_conn.data_to_send() reader.set_data(request_data) await conn.receive_data() # Verify stream was created assert 1 in conn.streams stream = conn.streams[1] # Default priority values assert stream.priority_weight == 16 assert stream.priority_depends_on == 0 # Send a PRIORITY frame client_conn.prioritize(1, weight=128, depends_on=0, exclusive=False) priority_data = client_conn.data_to_send() reader.set_data(priority_data) await conn.receive_data() # Verify priority was updated assert stream.priority_weight == 128 @pytest.mark.asyncio async def test_handle_priority_updated_nonexistent_stream(self): """Test that priority update for nonexistent stream is ignored.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create a client connection client_conn = create_client_connection() client_data = client_conn.data_to_send() reader.set_data(client_data) await conn.initiate_connection() await conn.receive_data() # Send a PRIORITY frame for a stream that doesn't exist client_conn.prioritize(99, weight=64, depends_on=0, exclusive=False) priority_data = client_conn.data_to_send() reader.set_data(priority_data) # Should not raise await conn.receive_data() class TestAsyncHTTP2ConnectionTrailers: """Test async HTTP/2 response trailer support.""" @pytest.mark.asyncio async def test_send_trailers_after_headers_and_body(self): """Test sending trailers after response headers and body.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create a client connection client_conn = create_client_connection() client_data = client_conn.data_to_send() reader.set_data(client_data) await conn.initiate_connection() await conn.receive_data() writer.clear() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Manually send headers without ending stream (for trailer support) stream = conn.streams[1] response_headers = [(':status', '200'), ('content-type', 'text/plain')] conn.h2_conn.send_headers(1, response_headers, end_stream=False) stream.send_headers(response_headers, end_stream=False) await conn._send_pending_data() # Send body without ending stream conn.h2_conn.send_data(1, b'Hello World', end_stream=False) stream.send_data(b'Hello World', end_stream=False) await conn._send_pending_data() # Send trailers trailers = [('grpc-status', '0'), ('grpc-message', 'OK')] await conn.send_trailers(1, trailers) # Verify stream is closed assert stream.response_complete is True assert stream.response_trailers == [('grpc-status', '0'), ('grpc-message', 'OK')] @pytest.mark.asyncio async def test_send_trailers_pseudo_header_raises(self): """Test that pseudo-headers in trailers raise error.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Send response await conn.send_response(1, 200, [('content-type', 'text/plain')], None) # Try to send trailers with pseudo-header with pytest.raises(HTTP2Error) as exc_info: await conn.send_trailers(1, [(':status', '200')]) assert "Pseudo-header" in str(exc_info.value) @pytest.mark.asyncio async def test_send_trailers_without_headers_returns_false(self): """Test that sending trailers without headers returns False.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Try to send trailers without sending headers first - should return False result = await conn.send_trailers(1, [('trailer', 'value')]) assert result is False class TestAsyncHTTP2FlowControl: """Test async HTTP/2 flow control handling.""" @pytest.mark.asyncio async def test_send_data_respects_zero_window(self): """Test that send_data returns False when flow control window is 0.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create client and send preface client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Send response headers without ending stream conn.h2_conn.send_headers(1, [ (':status', '200'), ('content-type', 'text/plain'), ], end_stream=False) await conn._send_pending_data() conn.streams[1].send_headers([(':status', '200')], end_stream=False) # Mock the flow control window to return 0 original_window = conn.h2_conn.local_flow_control_window conn.h2_conn.local_flow_control_window = lambda stream_id: 0 # Try to send data - should return False (not raise) result = await conn.send_data(1, b'Hello, World!') assert result is False # Restore conn.h2_conn.local_flow_control_window = original_window @pytest.mark.asyncio async def test_send_data_respects_flow_control(self): """Test that send_data chunks data according to flow control window.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create client and send preface client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Send response headers without ending stream conn.h2_conn.send_headers(1, [ (':status', '200'), ('content-type', 'text/plain'), ], end_stream=False) await conn._send_pending_data() conn.streams[1].send_headers([(':status', '200')], end_stream=False) # Send small data - should succeed within window small_data = b'Hello' await conn.send_data(1, small_data, end_stream=True) # Verify data was sent sent_data = writer.get_written_data() assert len(sent_data) > 0 class TestAsyncHTTP2StreamClosedHandling: """Test graceful handling of StreamClosedError in async connection.""" @pytest.mark.asyncio async def test_send_response_on_closed_stream(self): """Test that send_response gracefully handles closed stream.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create client and send preface client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Simulate client resetting the stream client_conn.reset_stream(1) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Try to send response - should return False, not raise result = await conn.send_response(1, 200, [('content-type', 'text/plain')], b'Hello') assert result is False @pytest.mark.asyncio async def test_send_data_on_reset_stream(self): """Test that send_data gracefully handles reset stream.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create client and send preface client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Send response headers without ending stream conn.h2_conn.send_headers(1, [ (':status', '200'), ('content-type', 'text/plain'), ], end_stream=False) await conn._send_pending_data() conn.streams[1].send_headers([(':status', '200')], end_stream=False) # Simulate client resetting the stream client_conn.reset_stream(1) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Try to send data - should return False, not raise result = await conn.send_data(1, b'Hello, World!', end_stream=True) assert result is False class TestAsyncHTTP2WindowOverflowHandling: """Test window overflow handling in async connection.""" @pytest.mark.asyncio async def test_window_overflow_sends_goaway(self): """Test that window overflow results in connection close.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection from gunicorn.http2.errors import HTTP2ErrorCode cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create client and send preface client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Mock increment_flow_control_window to raise ValueError (overflow) def raise_overflow(increment, stream_id=None): raise ValueError("Flow control window too large") conn.h2_conn.increment_flow_control_window = raise_overflow # Send a request with data to trigger the overflow client_conn.send_headers(1, [ (':method', 'POST'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=False) client_conn.send_data(1, b'test data', end_stream=True) reader.set_data(client_conn.data_to_send()) await conn.receive_data() # Connection should be closed with FLOW_CONTROL_ERROR assert conn.is_closed is True class TestAsyncHTTP2ProtocolErrorHandling: """Test protocol error handling sends proper GOAWAY.""" @pytest.mark.asyncio async def test_protocol_error_sends_goaway(self): """Test that protocol errors result in GOAWAY being sent.""" from gunicorn.http2.async_connection import AsyncHTTP2Connection from gunicorn.http2.errors import HTTP2ProtocolError, HTTP2ErrorCode cfg = MockConfig() reader = MockAsyncReader() writer = MockAsyncWriter() conn = AsyncHTTP2Connection(cfg, reader, writer, ('127.0.0.1', 12345)) # Create client and send preface client_conn = create_client_connection() reader.set_data(client_conn.data_to_send()) await conn.initiate_connection() await conn.receive_data() # Clear sent data to only capture new frames writer.clear() # Mock h2_conn.receive_data to raise ProtocolError def raise_protocol_error(data): raise h2.exceptions.ProtocolError("Test protocol error") conn.h2_conn.receive_data = raise_protocol_error # Set some dummy data for the reader reader.set_data(b'dummy data') # This should send GOAWAY and raise ProtocolError with pytest.raises(HTTP2ProtocolError) as exc_info: await conn.receive_data() assert "Test protocol error" in str(exc_info.value) # Verify something was sent (GOAWAY frame) sent_data = writer.get_written_data() assert len(sent_data) > 0 # Connection should be marked as closed assert conn.is_closed is True benoitc-gunicorn-f5fb19e/tests/test_http2_config.py000066400000000000000000000256741514360242400226420ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP/2 configuration settings.""" import pytest from gunicorn import config from gunicorn.config import Config class TestHttpProtocolsConfig: """Test http_protocols configuration setting.""" def test_default_is_h1(self): c = Config() assert c.http_protocols == ["h1"] def test_set_h1_only(self): c = Config() c.set("http_protocols", "h1") assert c.http_protocols == ["h1"] def test_set_h2_only(self): c = Config() c.set("http_protocols", "h2") assert c.http_protocols == ["h2"] def test_set_h1_and_h2(self): c = Config() c.set("http_protocols", "h2,h1") assert c.http_protocols == ["h2", "h1"] def test_set_h1_h2_order_preserved(self): c = Config() c.set("http_protocols", "h1,h2") assert c.http_protocols == ["h1", "h2"] def test_whitespace_handling(self): c = Config() c.set("http_protocols", " h1 , h2 ") assert c.http_protocols == ["h1", "h2"] def test_case_insensitive(self): c = Config() c.set("http_protocols", "H1,H2") assert c.http_protocols == ["h1", "h2"] def test_empty_string_defaults_to_h1(self): c = Config() c.set("http_protocols", "") assert c.http_protocols == ["h1"] def test_none_defaults_to_h1(self): c = Config() c.set("http_protocols", None) assert c.http_protocols == ["h1"] def test_invalid_protocol(self): c = Config() with pytest.raises(ValueError) as exc_info: c.set("http_protocols", "h4") assert "Invalid protocol" in str(exc_info.value) assert "h4" in str(exc_info.value) def test_invalid_type(self): c = Config() with pytest.raises(TypeError) as exc_info: c.set("http_protocols", 123) assert "must be a string" in str(exc_info.value) def test_invalid_type_list(self): c = Config() with pytest.raises(TypeError): c.set("http_protocols", ["h1", "h2"]) def test_mixed_valid_invalid(self): c = Config() with pytest.raises(ValueError): c.set("http_protocols", "h1,invalid,h2") class TestHttp2MaxConcurrentStreams: """Test http2_max_concurrent_streams configuration setting.""" def test_default_value(self): c = Config() assert c.http2_max_concurrent_streams == 100 def test_set_custom_value(self): c = Config() c.set("http2_max_concurrent_streams", 50) assert c.http2_max_concurrent_streams == 50 def test_set_from_string(self): c = Config() c.set("http2_max_concurrent_streams", "200") assert c.http2_max_concurrent_streams == 200 def test_set_high_value(self): c = Config() c.set("http2_max_concurrent_streams", 1000) assert c.http2_max_concurrent_streams == 1000 def test_negative_value_raises(self): c = Config() with pytest.raises(ValueError): c.set("http2_max_concurrent_streams", -1) def test_zero_value(self): # Zero is technically valid for positive int validator # It may have special meaning (use h2 default) c = Config() c.set("http2_max_concurrent_streams", 0) assert c.http2_max_concurrent_streams == 0 class TestHttp2InitialWindowSize: """Test http2_initial_window_size configuration setting.""" def test_default_value(self): c = Config() # Default per RFC 7540 is 65535 assert c.http2_initial_window_size == 65535 def test_set_custom_value(self): c = Config() c.set("http2_initial_window_size", 131072) assert c.http2_initial_window_size == 131072 def test_set_from_string(self): c = Config() c.set("http2_initial_window_size", "32768") assert c.http2_initial_window_size == 32768 def test_negative_value_raises(self): c = Config() with pytest.raises(ValueError): c.set("http2_initial_window_size", -1) class TestHttp2MaxFrameSize: """Test http2_max_frame_size configuration setting.""" def test_default_value(self): c = Config() # Default per RFC 7540 is 16384 assert c.http2_max_frame_size == 16384 def test_set_custom_value(self): c = Config() c.set("http2_max_frame_size", 32768) assert c.http2_max_frame_size == 32768 def test_set_from_string(self): c = Config() c.set("http2_max_frame_size", "65536") assert c.http2_max_frame_size == 65536 def test_valid_min_value(self): """RFC 7540 minimum is 16384 (2^14).""" c = Config() c.set("http2_max_frame_size", 16384) assert c.http2_max_frame_size == 16384 def test_valid_max_value(self): """RFC 7540 maximum is 16777215 (2^24 - 1).""" c = Config() c.set("http2_max_frame_size", 16777215) assert c.http2_max_frame_size == 16777215 def test_valid_mid_range_value(self): """Test a value in the middle of the valid range.""" c = Config() c.set("http2_max_frame_size", 1048576) # 1MB assert c.http2_max_frame_size == 1048576 def test_below_min_raises(self): """Values below 16384 should raise ValueError per RFC 7540.""" c = Config() with pytest.raises(ValueError) as exc_info: c.set("http2_max_frame_size", 16383) assert "must be between 16384 and 16777215" in str(exc_info.value) def test_above_max_raises(self): """Values above 16777215 should raise ValueError per RFC 7540.""" c = Config() with pytest.raises(ValueError) as exc_info: c.set("http2_max_frame_size", 16777216) assert "must be between 16384 and 16777215" in str(exc_info.value) def test_negative_value_raises(self): c = Config() with pytest.raises(ValueError): c.set("http2_max_frame_size", -1) class TestHttp2MaxHeaderListSize: """Test http2_max_header_list_size configuration setting.""" def test_default_value(self): c = Config() assert c.http2_max_header_list_size == 65536 def test_set_custom_value(self): c = Config() c.set("http2_max_header_list_size", 131072) assert c.http2_max_header_list_size == 131072 def test_set_from_string(self): c = Config() c.set("http2_max_header_list_size", "262144") assert c.http2_max_header_list_size == 262144 def test_negative_value_raises(self): c = Config() with pytest.raises(ValueError): c.set("http2_max_header_list_size", -1) class TestHttp2ConfigPropertyAccess: """Test property access for HTTP/2 settings.""" def test_all_http2_settings_accessible(self): c = Config() # These should not raise _ = c.http_protocols _ = c.http2_max_concurrent_streams _ = c.http2_initial_window_size _ = c.http2_max_frame_size _ = c.http2_max_header_list_size class TestHttp2ConfigDefaults: """Test that defaults match HTTP/2 specification values.""" def test_window_size_matches_rfc(self): """RFC 7540 default is 2^16-1 (65535).""" c = Config() assert c.http2_initial_window_size == 65535 def test_max_frame_size_matches_rfc_minimum(self): """RFC 7540 minimum is 2^14 (16384).""" c = Config() assert c.http2_max_frame_size == 16384 def test_concurrent_streams_reasonable_default(self): """Default should be reasonable for production use.""" c = Config() assert 1 <= c.http2_max_concurrent_streams <= 1000 class TestValidateHttpProtocols: """Test the validate_http_protocols function directly.""" def test_validate_none(self): result = config.validate_http_protocols(None) assert result == ["h1"] def test_validate_empty_string(self): result = config.validate_http_protocols("") assert result == ["h1"] def test_validate_whitespace_only(self): result = config.validate_http_protocols(" ") assert result == ["h1"] def test_validate_single_protocol(self): result = config.validate_http_protocols("h2") assert result == ["h2"] def test_validate_multiple_protocols(self): result = config.validate_http_protocols("h2,h1") assert result == ["h2", "h1"] def test_validate_with_spaces(self): result = config.validate_http_protocols("h2 , h1") assert result == ["h2", "h1"] def test_validate_uppercase(self): result = config.validate_http_protocols("H2,H1") assert result == ["h1", "h2"] or result == ["h2", "h1"] def test_validate_invalid_raises(self): with pytest.raises(ValueError): config.validate_http_protocols("http2") def test_validate_type_error(self): with pytest.raises(TypeError): config.validate_http_protocols(42) class TestValidateHttp2FrameSize: """Test the validate_http2_frame_size function directly.""" def test_validate_min_value(self): """RFC 7540 minimum is 16384 (2^14).""" result = config.validate_http2_frame_size(16384) assert result == 16384 def test_validate_max_value(self): """RFC 7540 maximum is 16777215 (2^24 - 1).""" result = config.validate_http2_frame_size(16777215) assert result == 16777215 def test_validate_mid_range(self): """Test a value in the middle of the valid range.""" result = config.validate_http2_frame_size(1000000) assert result == 1000000 def test_validate_from_string(self): """Test that string values are converted properly.""" result = config.validate_http2_frame_size("32768") assert result == 32768 def test_validate_hex_string(self): """Test hex string conversion.""" result = config.validate_http2_frame_size("0x10000") # 65536 assert result == 65536 def test_validate_below_min_raises(self): """Values below 16384 should raise ValueError.""" with pytest.raises(ValueError) as exc_info: config.validate_http2_frame_size(16383) assert "must be between 16384 and 16777215" in str(exc_info.value) def test_validate_above_max_raises(self): """Values above 16777215 should raise ValueError.""" with pytest.raises(ValueError) as exc_info: config.validate_http2_frame_size(16777216) assert "must be between 16384 and 16777215" in str(exc_info.value) def test_validate_zero_raises(self): """Zero is below minimum and should raise ValueError.""" with pytest.raises(ValueError): config.validate_http2_frame_size(0) def test_validate_negative_raises(self): """Negative values should raise ValueError.""" with pytest.raises(ValueError): config.validate_http2_frame_size(-1) benoitc-gunicorn-f5fb19e/tests/test_http2_connection.py000066400000000000000000001014621514360242400235220ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP/2 server connection.""" import pytest from unittest import mock from io import BytesIO # Check if h2 is available for integration tests try: import h2.connection import h2.config import h2.events import h2.exceptions H2_AVAILABLE = True except ImportError: H2_AVAILABLE = False from gunicorn.http2.errors import ( HTTP2Error, HTTP2ConnectionError ) pytestmark = pytest.mark.skipif(not H2_AVAILABLE, reason="h2 library not available") class MockConfig: """Mock gunicorn configuration for HTTP/2.""" def __init__(self): self.http2_max_concurrent_streams = 100 self.http2_initial_window_size = 65535 self.http2_max_frame_size = 16384 self.http2_max_header_list_size = 65536 class MockSocket: """Mock socket for testing connection without real network I/O.""" def __init__(self, data=b''): self._recv_buffer = BytesIO(data) self._sent = bytearray() self._closed = False def recv(self, size): return self._recv_buffer.read(size) def sendall(self, data): if self._closed: raise OSError("Socket is closed") self._sent.extend(data) def close(self): self._closed = True def get_sent_data(self): return bytes(self._sent) def set_recv_data(self, data): self._recv_buffer = BytesIO(data) def create_client_connection(): """Create an h2 client connection for generating test frames.""" config = h2.config.H2Configuration(client_side=True) conn = h2.connection.H2Connection(config=config) conn.initiate_connection() return conn class TestHTTP2ServerConnectionInit: """Test HTTP2ServerConnection initialization.""" def test_basic_initialization(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) assert conn.cfg is cfg assert conn.sock is sock assert conn.client_addr == ('127.0.0.1', 12345) assert conn.streams == {} assert conn.is_closed is False assert conn._initialized is False def test_settings_from_config(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() cfg.http2_max_concurrent_streams = 50 cfg.http2_initial_window_size = 32768 sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) assert conn.max_concurrent_streams == 50 assert conn.initial_window_size == 32768 class TestHTTP2ServerConnectionInitiate: """Test connection initiation.""" def test_initiate_connection(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() assert conn._initialized is True # Should have sent settings frame sent_data = sock.get_sent_data() assert len(sent_data) > 0 def test_initiate_connection_idempotent(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() first_sent = len(sock.get_sent_data()) conn.initiate_connection() # Second call second_sent = len(sock.get_sent_data()) # Should not send additional data assert first_sent == second_sent class TestHTTP2ServerConnectionReceiveData: """Test receiving and processing data.""" def test_receive_empty_data_closes_connection(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket(b'') conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() requests = conn.receive_data() assert conn.is_closed is True assert requests == [] def test_receive_client_preface_and_headers(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Generate client data client = create_client_connection() client_preface = client.data_to_send() # Simulate server receiving client settings # Feed client preface to server requests = conn.receive_data(client_preface) # No requests yet, just settings exchange assert requests == [] def test_receive_simple_get_request(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send request client = create_client_connection() client_preface = client.data_to_send() # Process client preface on server conn.receive_data(client_preface) # Server may have sent settings, feed them to client server_data = sock.get_sent_data() if server_data: client.receive_data(server_data) # Client sends GET request client.send_headers( stream_id=1, headers=[ (':method', 'GET'), (':path', '/test'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True ) request_data = client.data_to_send() # Server receives request requests = conn.receive_data(request_data) assert len(requests) == 1 req = requests[0] assert req.method == 'GET' assert req.path == '/test' def test_receive_post_with_body(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client client = create_client_connection() client_preface = client.data_to_send() conn.receive_data(client_preface) server_data = sock.get_sent_data() if server_data: client.receive_data(server_data) # Client sends POST with body client.send_headers( stream_id=1, headers=[ (':method', 'POST'), (':path', '/submit'), (':scheme', 'https'), (':authority', 'localhost'), ('content-type', 'application/json'), ('content-length', '13'), ], end_stream=False ) client.send_data(stream_id=1, data=b'{"key":"val"}', end_stream=True) request_data = client.data_to_send() requests = conn.receive_data(request_data) assert len(requests) == 1 req = requests[0] assert req.method == 'POST' assert req.body.read() == b'{"key":"val"}' def test_socket_error_raises_connection_error(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = mock.Mock() sock.recv.side_effect = OSError("Connection reset") conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() with pytest.raises(HTTP2ConnectionError): conn.receive_data() class TestHTTP2ServerConnectionSendResponse: """Test sending responses.""" def test_send_simple_response(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create a stream by receiving a request client = create_client_connection() client_preface = client.data_to_send() conn.receive_data(client_preface) server_data = sock.get_sent_data() if server_data: client.receive_data(server_data) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client.data_to_send()) # Send response sock._sent.clear() conn.send_response( stream_id=1, status=200, headers=[('content-type', 'text/plain')], body=b'Hello!' ) sent = sock.get_sent_data() assert len(sent) > 0 # Verify client receives valid response events = client.receive_data(sent) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert len(response_events) == 1 assert len(data_events) == 1 assert data_events[0].data == b'Hello!' def test_send_response_with_empty_body(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client = create_client_connection() conn.receive_data(client.data_to_send()) client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'HEAD'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client.data_to_send()) sock._sent.clear() conn.send_response(stream_id=1, status=200, headers=[], body=None) events = client.receive_data(sock.get_sent_data()) stream_ended = [e for e in events if isinstance(e, h2.events.StreamEnded)] assert len(stream_ended) == 1 def test_send_response_invalid_stream(self): """Test that sending response on invalid stream returns False.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Sending to a non-existent stream should return False gracefully result = conn.send_response(stream_id=999, status=200, headers=[], body=None) assert result is False class TestHTTP2ServerConnectionSendError: """Test sending error responses.""" def test_send_error_with_message(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client = create_client_connection() conn.receive_data(client.data_to_send()) client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/notfound'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client.data_to_send()) sock._sent.clear() conn.send_error(stream_id=1, status_code=404, message="Not Found") events = client.receive_data(sock.get_sent_data()) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert len(response_events) == 1 # h2 library returns headers as list of tuples, convert to dict # Note: headers may be bytes or strings depending on h2 version headers_list = response_events[0].headers status = None for name, value in headers_list: name_str = name.decode() if isinstance(name, bytes) else name if name_str == ':status': status = value.decode() if isinstance(value, bytes) else value break assert status == '404' assert len(data_events) == 1 assert data_events[0].data == b"Not Found" class TestHTTP2ServerConnectionResetStream: """Test stream reset.""" def test_reset_stream(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client = create_client_connection() conn.receive_data(client.data_to_send()) client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=False) conn.receive_data(client.data_to_send()) sock._sent.clear() conn.reset_stream(stream_id=1, error_code=0x8) # CANCEL events = client.receive_data(sock.get_sent_data()) reset_events = [e for e in events if isinstance(e, h2.events.StreamReset)] assert len(reset_events) == 1 assert reset_events[0].error_code == 0x8 class TestHTTP2ServerConnectionClose: """Test connection close.""" def test_close_connection(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client = create_client_connection() conn.receive_data(client.data_to_send()) sock._sent.clear() conn.close() assert conn.is_closed is True # Should have sent GOAWAY events = client.receive_data(sock.get_sent_data()) goaway_events = [e for e in events if isinstance(e, h2.events.ConnectionTerminated)] assert len(goaway_events) == 1 def test_close_idempotent(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() conn.close() sent_after_first = len(sock.get_sent_data()) conn.close() # Second call sent_after_second = len(sock.get_sent_data()) # Should not send additional GOAWAY assert sent_after_first == sent_after_second class TestHTTP2ServerConnectionCleanup: """Test stream cleanup.""" def test_cleanup_stream(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client = create_client_connection() conn.receive_data(client.data_to_send()) client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client.data_to_send()) assert 1 in conn.streams conn.cleanup_stream(1) assert 1 not in conn.streams def test_cleanup_nonexistent_stream(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Should not raise conn.cleanup_stream(999) class TestHTTP2ServerConnectionMultipleStreams: """Test handling multiple concurrent streams.""" def test_multiple_streams(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client = create_client_connection() conn.receive_data(client.data_to_send()) client.receive_data(sock.get_sent_data()) # Send multiple requests client.send_headers(1, [ (':method', 'GET'), (':path', '/one'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) client.send_headers(3, [ (':method', 'GET'), (':path', '/two'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) requests = conn.receive_data(client.data_to_send()) assert len(requests) == 2 paths = {req.path for req in requests} assert paths == {'/one', '/two'} class TestHTTP2ServerConnectionRepr: """Test string representation.""" def test_repr(self): from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) repr_str = repr(conn) assert "HTTP2ServerConnection" in repr_str assert "streams=" in repr_str assert "closed=" in repr_str class TestHTTP2ServerConnectionPriority: """Test HTTP/2 priority handling.""" def test_handle_priority_updated_existing_stream(self): """Test handling priority update for existing stream.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create a client connection to generate frames client_conn = create_client_connection() # Get client preface client_data = client_conn.data_to_send() # Feed client preface to server conn.receive_data(client_data) sock._sent = bytearray() # Send a request to create a stream client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ]) request_data = client_conn.data_to_send() conn.receive_data(request_data) # Verify stream was created assert 1 in conn.streams stream = conn.streams[1] # Default priority values assert stream.priority_weight == 16 assert stream.priority_depends_on == 0 # Send a PRIORITY frame client_conn.prioritize(1, weight=128, depends_on=0, exclusive=False) priority_data = client_conn.data_to_send() conn.receive_data(priority_data) # Verify priority was updated assert stream.priority_weight == 128 def test_handle_priority_updated_nonexistent_stream(self): """Test that priority update for nonexistent stream is ignored.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create a client connection client_conn = create_client_connection() client_data = client_conn.data_to_send() conn.receive_data(client_data) # Send a PRIORITY frame for a stream that doesn't exist # This should not raise an error client_conn.prioritize(99, weight=64, depends_on=0, exclusive=False) priority_data = client_conn.data_to_send() # Should not raise conn.receive_data(priority_data) class TestHTTP2ServerConnectionTrailers: """Test HTTP/2 response trailer support.""" def test_send_trailers_after_headers_and_body(self): """Test sending trailers after response headers and body.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create a client connection client_conn = create_client_connection() client_data = client_conn.data_to_send() conn.receive_data(client_data) sock._sent = bytearray() # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) request_data = client_conn.data_to_send() conn.receive_data(request_data) # Manually send headers without ending stream (for trailer support) stream = conn.streams[1] response_headers = [(':status', '200'), ('content-type', 'text/plain')] conn.h2_conn.send_headers(1, response_headers, end_stream=False) stream.send_headers(response_headers, end_stream=False) conn._send_pending_data() # Send body without ending stream conn.h2_conn.send_data(1, b'Hello World', end_stream=False) stream.send_data(b'Hello World', end_stream=False) conn._send_pending_data() # Send trailers trailers = [('grpc-status', '0'), ('grpc-message', 'OK')] conn.send_trailers(1, trailers) # Verify stream is closed assert stream.response_complete is True assert stream.response_trailers == [('grpc-status', '0'), ('grpc-message', 'OK')] def test_send_trailers_pseudo_header_raises(self): """Test that pseudo-headers in trailers raise error.""" from gunicorn.http2.connection import HTTP2ServerConnection from gunicorn.http2.errors import HTTP2Error cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client_conn = create_client_connection() client_data = client_conn.data_to_send() conn.receive_data(client_data) # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client_conn.data_to_send()) # Send response conn.send_response(1, 200, [('content-type', 'text/plain')], None) # Try to send trailers with pseudo-header with pytest.raises(HTTP2Error) as exc_info: conn.send_trailers(1, [(':status', '200')]) assert "Pseudo-header" in str(exc_info.value) def test_send_trailers_without_headers_returns_false(self): """Test that sending trailers without headers returns False.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client_conn = create_client_connection() client_data = client_conn.data_to_send() conn.receive_data(client_data) # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client_conn.data_to_send()) # Try to send trailers without sending headers first - should return False result = conn.send_trailers(1, [('trailer', 'value')]) assert result is False def test_send_trailers_nonexistent_stream_returns_false(self): """Test that sending trailers on nonexistent stream returns False.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Sending trailers to non-existent stream should return False result = conn.send_trailers(99, [('trailer', 'value')]) assert result is False class TestHTTP2FlowControl: """Test HTTP/2 flow control handling.""" def test_send_data_respects_zero_window(self): """Test that send_data returns False when flow control window is 0.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send preface client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client_conn.data_to_send()) # Send response headers without ending stream (pass body=b'' placeholder) # We need to send headers first, so use h2_conn directly conn.h2_conn.send_headers(1, [ (':status', '200'), ('content-type', 'text/plain'), ], end_stream=False) conn._send_pending_data() conn.streams[1].send_headers([(':status', '200')], end_stream=False) # Mock the flow control window to return 0 original_window = conn.h2_conn.local_flow_control_window conn.h2_conn.local_flow_control_window = lambda stream_id: 0 # Try to send data - should return False (not raise) result = conn.send_data(1, b'Hello, World!') assert result is False # Restore conn.h2_conn.local_flow_control_window = original_window def test_send_data_respects_flow_control(self): """Test that send_data chunks data according to flow control window.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send preface client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client_conn.data_to_send()) # Send response headers without ending stream conn.h2_conn.send_headers(1, [ (':status', '200'), ('content-type', 'text/plain'), ], end_stream=False) conn._send_pending_data() conn.streams[1].send_headers([(':status', '200')], end_stream=False) # Send small data - should succeed within window small_data = b'Hello' conn.send_data(1, small_data, end_stream=True) # Verify data was sent sent_data = sock.get_sent_data() assert len(sent_data) > 0 class TestHTTP2StreamClosedHandling: """Test graceful handling of StreamClosedError.""" def test_send_response_on_closed_stream(self): """Test that send_response gracefully handles closed stream.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send preface client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client_conn.data_to_send()) # Simulate client resetting the stream client_conn.reset_stream(1) conn.receive_data(client_conn.data_to_send()) # Try to send response - should return False, not raise result = conn.send_response(1, 200, [('content-type', 'text/plain')], b'Hello') assert result is False def test_send_data_on_reset_stream(self): """Test that send_data gracefully handles reset stream.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send preface client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Send a request client_conn.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=True) conn.receive_data(client_conn.data_to_send()) # Send response headers without ending stream conn.h2_conn.send_headers(1, [ (':status', '200'), ('content-type', 'text/plain'), ], end_stream=False) conn._send_pending_data() conn.streams[1].send_headers([(':status', '200')], end_stream=False) # Simulate client resetting the stream client_conn.reset_stream(1) conn.receive_data(client_conn.data_to_send()) # Try to send data - should return False, not raise result = conn.send_data(1, b'Hello, World!', end_stream=True) assert result is False class TestHTTP2WindowOverflowHandling: """Test window overflow handling.""" def test_window_overflow_sends_goaway(self): """Test that window overflow results in GOAWAY with FLOW_CONTROL_ERROR.""" from gunicorn.http2.connection import HTTP2ServerConnection from gunicorn.http2.errors import HTTP2ErrorCode cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send preface client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Mock increment_flow_control_window to raise ValueError (overflow) original_increment = conn.h2_conn.increment_flow_control_window def raise_overflow(increment, stream_id=None): raise ValueError("Flow control window too large") conn.h2_conn.increment_flow_control_window = raise_overflow # Send a request with data to trigger the overflow client_conn.send_headers(1, [ (':method', 'POST'), (':path', '/'), (':scheme', 'https'), (':authority', 'localhost'), ], end_stream=False) client_conn.send_data(1, b'test data', end_stream=True) conn.receive_data(client_conn.data_to_send()) # Connection should be closed with FLOW_CONTROL_ERROR assert conn.is_closed is True class TestHTTP2ProtocolErrorHandling: """Test protocol error handling sends proper GOAWAY.""" def test_protocol_error_sends_goaway(self): """Test that protocol errors result in GOAWAY being sent.""" from gunicorn.http2.connection import HTTP2ServerConnection from gunicorn.http2.errors import HTTP2ProtocolError, HTTP2ErrorCode cfg = MockConfig() sock = MockSocket() conn = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) conn.initiate_connection() # Create client and send preface client_conn = create_client_connection() conn.receive_data(client_conn.data_to_send()) # Clear sent data to only capture new frames sock._sent.clear() # Mock h2_conn.receive_data to raise ProtocolError def raise_protocol_error(data): raise h2.exceptions.ProtocolError("Test protocol error") conn.h2_conn.receive_data = raise_protocol_error # This should send GOAWAY and raise ProtocolError with pytest.raises(HTTP2ProtocolError) as exc_info: conn.receive_data(b'dummy data') assert "Test protocol error" in str(exc_info.value) # Verify something was sent (GOAWAY frame) sent_data = sock.get_sent_data() assert len(sent_data) > 0 # Connection should be marked as closed assert conn.is_closed is True class TestHTTP2NotAvailable: """Test behavior when h2 is not available.""" def test_import_error_raises_not_available(self): from gunicorn.http2 import errors # Test that HTTP2NotAvailable can be raised with pytest.raises(errors.HTTP2NotAvailable): raise errors.HTTP2NotAvailable() benoitc-gunicorn-f5fb19e/tests/test_http2_errors.py000066400000000000000000000150061514360242400226750ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP/2 error classes.""" import pytest from gunicorn.http2.errors import ( HTTP2Error, HTTP2ProtocolError, HTTP2InternalError, HTTP2FlowControlError, HTTP2SettingsTimeout, HTTP2StreamClosed, HTTP2FrameSizeError, HTTP2RefusedStream, HTTP2Cancel, HTTP2CompressionError, HTTP2ConnectError, HTTP2EnhanceYourCalm, HTTP2InadequateSecurity, HTTP2RequiresHTTP11, HTTP2StreamError, HTTP2ConnectionError, HTTP2ConfigurationError, HTTP2NotAvailable, ) class TestHTTP2ErrorCodes: """Test RFC 7540 error codes.""" def test_no_error(self): err = HTTP2Error() assert err.error_code == 0x0 def test_protocol_error(self): err = HTTP2ProtocolError() assert err.error_code == 0x1 def test_internal_error(self): err = HTTP2InternalError() assert err.error_code == 0x2 def test_flow_control_error(self): err = HTTP2FlowControlError() assert err.error_code == 0x3 def test_settings_timeout(self): err = HTTP2SettingsTimeout() assert err.error_code == 0x4 def test_stream_closed(self): err = HTTP2StreamClosed() assert err.error_code == 0x5 def test_frame_size_error(self): err = HTTP2FrameSizeError() assert err.error_code == 0x6 def test_refused_stream(self): err = HTTP2RefusedStream() assert err.error_code == 0x7 def test_cancel(self): err = HTTP2Cancel() assert err.error_code == 0x8 def test_compression_error(self): err = HTTP2CompressionError() assert err.error_code == 0x9 def test_connect_error(self): err = HTTP2ConnectError() assert err.error_code == 0xa def test_enhance_your_calm(self): err = HTTP2EnhanceYourCalm() assert err.error_code == 0xb def test_inadequate_security(self): err = HTTP2InadequateSecurity() assert err.error_code == 0xc def test_http11_required(self): err = HTTP2RequiresHTTP11() assert err.error_code == 0xd class TestHTTP2ErrorInheritance: """Test error class inheritance.""" def test_all_inherit_from_http2error(self): error_classes = [ HTTP2ProtocolError, HTTP2InternalError, HTTP2FlowControlError, HTTP2SettingsTimeout, HTTP2StreamClosed, HTTP2FrameSizeError, HTTP2RefusedStream, HTTP2Cancel, HTTP2CompressionError, HTTP2ConnectError, HTTP2EnhanceYourCalm, HTTP2InadequateSecurity, HTTP2RequiresHTTP11, HTTP2StreamError, HTTP2ConnectionError, HTTP2ConfigurationError, HTTP2NotAvailable, ] for cls in error_classes: assert issubclass(cls, HTTP2Error) assert issubclass(cls, Exception) def test_http2error_is_exception(self): assert issubclass(HTTP2Error, Exception) class TestHTTP2ErrorMessages: """Test error message handling.""" def test_default_message_from_docstring(self): err = HTTP2ProtocolError() assert err.message == "Protocol error detected." assert str(err) == "Protocol error detected." def test_custom_message(self): err = HTTP2ProtocolError("Custom error message") assert err.message == "Custom error message" assert str(err) == "Custom error message" def test_custom_error_code(self): err = HTTP2Error("Test", error_code=0xFF) assert err.error_code == 0xFF def test_message_and_error_code(self): err = HTTP2ProtocolError("Custom", error_code=0x99) assert err.message == "Custom" assert err.error_code == 0x99 class TestHTTP2StreamError: """Test stream-specific error handling.""" def test_stream_id_in_error(self): err = HTTP2StreamError(stream_id=5) assert err.stream_id == 5 def test_stream_error_str(self): err = HTTP2StreamError(stream_id=7, message="Stream reset") assert "Stream 7" in str(err) assert "Stream reset" in str(err) def test_stream_error_default_message(self): err = HTTP2StreamError(stream_id=3) assert err.stream_id == 3 assert "Stream 3" in str(err) def test_stream_error_with_error_code(self): err = HTTP2StreamError(stream_id=1, error_code=0x8) assert err.stream_id == 1 assert err.error_code == 0x8 class TestHTTP2ConnectionError: """Test connection-level error handling.""" def test_connection_error_basic(self): err = HTTP2ConnectionError("Connection failed") assert str(err) == "Connection failed" assert isinstance(err, HTTP2Error) class TestHTTP2ConfigurationError: """Test configuration error handling.""" def test_configuration_error_basic(self): err = HTTP2ConfigurationError("Invalid setting") assert str(err) == "Invalid setting" assert isinstance(err, HTTP2Error) class TestHTTP2NotAvailable: """Test HTTP/2 unavailable error.""" def test_default_message(self): err = HTTP2NotAvailable() assert "h2 library" in err.message assert "pip install" in err.message def test_custom_message(self): err = HTTP2NotAvailable("Custom unavailable message") assert err.message == "Custom unavailable message" def test_inherits_from_http2error(self): err = HTTP2NotAvailable() assert isinstance(err, HTTP2Error) class TestErrorRaising: """Test that errors can be properly raised and caught.""" def test_raise_and_catch_http2error(self): with pytest.raises(HTTP2Error): raise HTTP2ProtocolError("Test") def test_raise_and_catch_specific(self): with pytest.raises(HTTP2ProtocolError): raise HTTP2ProtocolError("Test") def test_raise_stream_error(self): with pytest.raises(HTTP2StreamError) as exc_info: raise HTTP2StreamError(stream_id=5, message="Test stream error") assert exc_info.value.stream_id == 5 def test_error_chaining(self): try: try: raise ValueError("Original") except ValueError as e: raise HTTP2InternalError("Wrapped") from e except HTTP2InternalError as err: assert err.__cause__ is not None assert isinstance(err.__cause__, ValueError) benoitc-gunicorn-f5fb19e/tests/test_http2_integration.py000066400000000000000000000521031514360242400237030ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Integration tests for HTTP/2 with full request/response cycles.""" import pytest from io import BytesIO # Check if h2 is available try: import h2.connection import h2.config import h2.events H2_AVAILABLE = True except ImportError: H2_AVAILABLE = False pytestmark = pytest.mark.skipif(not H2_AVAILABLE, reason="h2 library not available") def get_header_value(headers_list, name): """Extract a header value from h2 headers list. h2 library may return headers as bytes or strings depending on version. """ for header_name, header_value in headers_list: name_str = header_name.decode() if isinstance(header_name, bytes) else header_name if name_str == name: return header_value.decode() if isinstance(header_value, bytes) else header_value return None class MockConfig: """Mock gunicorn configuration for HTTP/2.""" def __init__(self): self.http2_max_concurrent_streams = 100 self.http2_initial_window_size = 65535 self.http2_max_frame_size = 16384 self.http2_max_header_list_size = 65536 class MockSocket: """Mock socket for integration testing.""" def __init__(self, data=b''): self._recv_buffer = BytesIO(data) self._sent = bytearray() def recv(self, size): return self._recv_buffer.read(size) def sendall(self, data): self._sent.extend(data) def get_sent_data(self): return bytes(self._sent) def set_recv_data(self, data): self._recv_buffer = BytesIO(data) def clear_sent(self): self._sent.clear() def create_h2_client(): """Create an h2 client connection.""" config = h2.config.H2Configuration(client_side=True) conn = h2.connection.H2Connection(config=config) conn.initiate_connection() return conn class TestSimpleRequestResponse: """Test simple request/response cycles.""" def test_get_request_text_response(self): """Test a complete GET request with text response.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() # Client setup client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Client sends request client.send_headers(1, [ (':method', 'GET'), (':path', '/hello'), (':scheme', 'https'), (':authority', 'example.com'), ('accept', 'text/plain'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) # Server receives request requests = server.receive_data() assert len(requests) == 1 req = requests[0] # Verify request properties assert req.method == 'GET' assert req.path == '/hello' assert req.version == (2, 0) assert req.get_header('ACCEPT') == 'text/plain' # Server sends response sock.clear_sent() server.send_response( stream_id=1, status=200, headers=[ ('content-type', 'text/plain'), ('content-length', '12'), ], body=b'Hello World!' ) # Client verifies response events = client.receive_data(sock.get_sent_data()) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] assert len(response_events) == 1 headers_list = response_events[0].headers assert get_header_value(headers_list, ':status') == '200' assert get_header_value(headers_list, 'content-type') == 'text/plain' data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert len(data_events) == 1 assert data_events[0].data == b'Hello World!' def test_post_request_with_json_body(self): """Test POST request with JSON body and response.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Client sends POST with body request_body = b'{"username": "test", "action": "login"}' client.send_headers(1, [ (':method', 'POST'), (':path', '/api/login'), (':scheme', 'https'), (':authority', 'api.example.com'), ('content-type', 'application/json'), ('content-length', str(len(request_body))), ], end_stream=False) client.send_data(1, request_body, end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert len(requests) == 1 req = requests[0] assert req.method == 'POST' assert req.content_type == 'application/json' assert req.body.read() == request_body # Server responds sock.clear_sent() response_body = b'{"status": "success", "token": "abc123"}' server.send_response( stream_id=1, status=200, headers=[ ('content-type', 'application/json'), ('content-length', str(len(response_body))), ], body=response_body ) events = client.receive_data(sock.get_sent_data()) data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert data_events[0].data == response_body class TestMultipleStreams: """Test concurrent stream handling.""" def test_concurrent_requests(self): """Test handling multiple concurrent requests.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Client sends three concurrent requests for stream_id, path in [(1, '/one'), (3, '/two'), (5, '/three')]: client.send_headers(stream_id, [ (':method', 'GET'), (':path', path), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert len(requests) == 3 paths = {req.path for req in requests} assert paths == {'/one', '/two', '/three'} # Server responds to all sock.clear_sent() for req in requests: server.send_response( stream_id=req.stream.stream_id, status=200, headers=[('x-path', req.path)], body=req.path.encode() ) events = client.receive_data(sock.get_sent_data()) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] assert len(response_events) == 3 def test_interleaved_request_response(self): """Test interleaved request and response processing.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # First request client.send_headers(1, [ (':method', 'GET'), (':path', '/first'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert len(requests) == 1 # Respond to first before second arrives sock.clear_sent() server.send_response(1, 200, [], b'First response') client.receive_data(sock.get_sent_data()) # Second request client.send_headers(3, [ (':method', 'GET'), (':path', '/second'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert len(requests) == 1 # Respond to second sock.clear_sent() server.send_response(3, 200, [], b'Second response') events = client.receive_data(sock.get_sent_data()) data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] assert data_events[0].data == b'Second response' class TestErrorHandling: """Test error response scenarios.""" def test_404_response(self): """Test 404 Not Found response.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/nonexistent'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) server.receive_data() sock.clear_sent() server.send_error(1, 404, "Not Found") events = client.receive_data(sock.get_sent_data()) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] headers_list = response_events[0].headers assert get_header_value(headers_list, ':status') == '404' def test_500_response(self): """Test 500 Internal Server Error response.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/error'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) server.receive_data() sock.clear_sent() server.send_error(1, 500, "Internal Server Error") events = client.receive_data(sock.get_sent_data()) response_events = [e for e in events if isinstance(e, h2.events.ResponseReceived)] headers_list = response_events[0].headers assert get_header_value(headers_list, ':status') == '500' def test_stream_reset_by_server(self): """Test server resetting a stream.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Start a request but don't finish client.send_headers(1, [ (':method', 'POST'), (':path', '/upload'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=False) sock.set_recv_data(client.data_to_send()) server.receive_data() # Server resets the stream sock.clear_sent() server.reset_stream(1, error_code=0x8) # CANCEL events = client.receive_data(sock.get_sent_data()) reset_events = [e for e in events if isinstance(e, h2.events.StreamReset)] assert len(reset_events) == 1 assert reset_events[0].error_code == 0x8 class TestConnectionLifecycle: """Test connection lifecycle events.""" def test_graceful_shutdown(self): """Test graceful connection shutdown with GOAWAY.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Process a request first client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) server.receive_data() sock.clear_sent() server.send_response(1, 200, [], b'OK') client.receive_data(sock.get_sent_data()) # Server initiates graceful shutdown sock.clear_sent() server.close() events = client.receive_data(sock.get_sent_data()) goaway_events = [e for e in events if isinstance(e, h2.events.ConnectionTerminated)] assert len(goaway_events) == 1 def test_client_initiated_close(self): """Test handling client-initiated connection close.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Client closes connection client.close_connection() sock.set_recv_data(client.data_to_send()) server.receive_data() assert server.is_closed is True class TestLargePayloads: """Test handling of large payloads.""" def test_moderate_request_body(self): """Test handling moderate-sized request body within flow control.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) # Send body that fits within initial window (65535 bytes) body = b'X' * 10000 client.send_headers(1, [ (':method', 'POST'), (':path', '/upload'), (':scheme', 'https'), (':authority', 'example.com'), ('content-length', str(len(body))), ], end_stream=False) client.send_data(1, body, end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert len(requests) == 1 received_body = requests[0].body.read() assert len(received_body) == len(body) assert received_body == body def test_moderate_response_body(self): """Test sending moderate-sized response body.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/moderate'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) server.receive_data() # Send moderate response (within max frame size) moderate_body = b'Y' * 8000 sock.clear_sent() server.send_response(1, 200, [('content-length', str(len(moderate_body)))], moderate_body) # Client receives response events = client.receive_data(sock.get_sent_data()) data_events = [e for e in events if isinstance(e, h2.events.DataReceived)] received_data = b''.join(e.data for e in data_events) assert received_data == moderate_body class TestSpecialCases: """Test special/edge cases.""" def test_head_request(self): """Test HEAD request (no body in response).""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'HEAD'), (':path', '/resource'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert requests[0].method == 'HEAD' # Send response with content-length but no body sock.clear_sent() server.send_response( 1, 200, [('content-length', '1000'), ('content-type', 'text/html')], body=None ) events = client.receive_data(sock.get_sent_data()) stream_ended = [e for e in events if isinstance(e, h2.events.StreamEnded)] assert len(stream_ended) == 1 def test_options_request(self): """Test OPTIONS request.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'OPTIONS'), (':path', '*'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() assert requests[0].method == 'OPTIONS' assert requests[0].uri == '*' def test_request_with_query_string(self): """Test request with query string parameters.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/search?q=test&page=2&sort=desc'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() req = requests[0] assert req.path == '/search' assert req.query == 'q=test&page=2&sort=desc' def test_request_with_multiple_headers_same_name(self): """Test request with multiple headers of the same name.""" from gunicorn.http2.connection import HTTP2ServerConnection cfg = MockConfig() sock = MockSocket() server = HTTP2ServerConnection(cfg, sock, ('127.0.0.1', 12345)) server.initiate_connection() client = create_h2_client() sock.set_recv_data(client.data_to_send()) server.receive_data() client.receive_data(sock.get_sent_data()) client.send_headers(1, [ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ('accept', 'text/html'), ('accept', 'application/json'), ('accept', '*/*'), ], end_stream=True) sock.set_recv_data(client.data_to_send()) requests = server.receive_data() req = requests[0] accept_headers = [h[1] for h in req.headers if h[0] == 'ACCEPT'] assert len(accept_headers) == 3 benoitc-gunicorn-f5fb19e/tests/test_http2_request.py000066400000000000000000000545771514360242400230710ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP/2 request and body classes.""" import pytest from gunicorn.http2.request import HTTP2Request, HTTP2Body from gunicorn.http2.stream import HTTP2Stream class MockConnection: """Mock HTTP/2 connection for testing.""" def __init__(self, initial_window_size=65535): self.initial_window_size = initial_window_size class MockConfig: """Mock gunicorn configuration.""" def __init__(self): pass class TestHTTP2Body: """Test HTTP2Body class.""" def test_init_with_data(self): body = HTTP2Body(b"Hello, World!") assert len(body) == 13 def test_init_empty(self): body = HTTP2Body(b"") assert len(body) == 0 def test_read_all(self): body = HTTP2Body(b"Test data") assert body.read() == b"Test data" assert body.read() == b"" # Already consumed def test_read_with_size(self): body = HTTP2Body(b"Hello, World!") assert body.read(5) == b"Hello" assert body.read(2) == b", " assert body.read(100) == b"World!" assert body.read(1) == b"" def test_read_none_size(self): body = HTTP2Body(b"Test") assert body.read(None) == b"Test" def test_readline_basic(self): body = HTTP2Body(b"Line1\nLine2\nLine3") assert body.readline() == b"Line1\n" assert body.readline() == b"Line2\n" assert body.readline() == b"Line3" def test_readline_with_size(self): body = HTTP2Body(b"Hello\nWorld") assert body.readline(3) == b"Hel" assert body.readline(10) == b"lo\n" def test_readline_no_newline(self): body = HTTP2Body(b"No newline here") assert body.readline() == b"No newline here" def test_readline_empty(self): body = HTTP2Body(b"") assert body.readline() == b"" def test_readline_crlf(self): body = HTTP2Body(b"Line1\r\nLine2") # BytesIO readline includes \r\n assert body.readline() == b"Line1\r\n" def test_readlines_basic(self): body = HTTP2Body(b"Line1\nLine2\nLine3") lines = body.readlines() assert lines == [b"Line1\n", b"Line2\n", b"Line3"] def test_readlines_with_hint(self): body = HTTP2Body(b"Line1\nLine2\nLine3\nLine4") # Hint affects how many lines are returned lines = body.readlines(hint=5) assert len(lines) >= 1 def test_readlines_empty(self): body = HTTP2Body(b"") assert body.readlines() == [] def test_iter(self): body = HTTP2Body(b"Line1\nLine2\nLine3") lines = list(body) assert lines == [b"Line1\n", b"Line2\n", b"Line3"] def test_len(self): body = HTTP2Body(b"12345") assert len(body) == 5 def test_close(self): body = HTTP2Body(b"test") body.close() # Should not raise with pytest.raises(ValueError): body.read() class TestHTTP2BodyReadStrategies: """Test different reading strategies matching HTTP/1.x patterns.""" def test_read_all_at_once(self): data = b"A" * 1000 body = HTTP2Body(data) result = body.read() assert result == data def test_read_chunked(self): data = b"A" * 100 body = HTTP2Body(data) chunks = [] while True: chunk = body.read(10) if not chunk: break chunks.append(chunk) assert b"".join(chunks) == data assert len(chunks) == 10 def test_read_byte_by_byte(self): data = b"Hello" body = HTTP2Body(data) result = [] for _ in range(len(data)): result.append(body.read(1)) assert b"".join(result) == data def test_readline_all_lines(self): data = b"Line1\nLine2\nLine3\n" body = HTTP2Body(data) lines = [] while True: line = body.readline() if not line: break lines.append(line) assert lines == [b"Line1\n", b"Line2\n", b"Line3\n"] class TestHTTP2Request: """Test HTTP2Request class.""" def _make_stream(self, headers, body=b""): """Helper to create a stream with headers and body.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers(headers, end_stream=(len(body) == 0)) if body: stream.request_body.write(body) stream.request_complete = True return stream def test_basic_get_request(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/test'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.method == 'GET' assert req.uri == '/test' assert req.path == '/test' assert req.scheme == 'https' assert req.version == (2, 0) def test_post_request_with_body(self): stream = self._make_stream( [ (':method', 'POST'), (':path', '/submit'), (':scheme', 'https'), (':authority', 'api.example.com'), ('content-type', 'application/json'), ('content-length', '13'), ], body=b'{"key":"val"}' ) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('192.168.1.1', 54321)) assert req.method == 'POST' assert req.body.read() == b'{"key":"val"}' assert req.content_type == 'application/json' assert req.content_length == 13 def test_path_with_query_string(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/search?q=test&page=1'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.path == '/search' assert req.query == 'q=test&page=1' assert req.uri == '/search?q=test&page=1' def test_path_with_fragment(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/page#section'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.path == '/page' assert req.fragment == 'section' def test_headers_uppercase_conversion(self): """HTTP/2 headers are lowercase, should be converted to uppercase.""" stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ('content-type', 'text/html'), ('accept-language', 'en-US'), ('x-custom-header', 'custom-value'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) header_names = [h[0] for h in req.headers] assert 'CONTENT-TYPE' in header_names assert 'ACCEPT-LANGUAGE' in header_names assert 'X-CUSTOM-HEADER' in header_names def test_host_header_from_authority(self): """Host header should be generated from :authority pseudo-header.""" stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'test.example.com:8080'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) host = req.get_header('HOST') assert host == 'test.example.com:8080' def test_authority_overrides_host_header(self): """:authority MUST override Host header per RFC 9113 section 8.3.1.""" stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'authority.example.com'), ('host', 'explicit.example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) # Count HOST headers - should be exactly one, from :authority host_headers = [h for h in req.headers if h[0] == 'HOST'] assert len(host_headers) == 1 assert host_headers[0][1] == 'authority.example.com' def test_get_header_case_insensitive(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ('x-test-header', 'test-value'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.get_header('X-TEST-HEADER') == 'test-value' assert req.get_header('x-test-header') == 'test-value' assert req.get_header('X-Test-Header') == 'test-value' def test_get_header_not_found(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.get_header('X-Not-Exists') is None def test_content_length_property(self): stream = self._make_stream([ (':method', 'POST'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ('content-length', '42'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.content_length == 42 def test_content_length_none_when_missing(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.content_length is None def test_content_length_invalid_value(self): stream = self._make_stream([ (':method', 'POST'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ('content-length', 'not-a-number'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.content_length is None def test_content_type_property(self): stream = self._make_stream([ (':method', 'POST'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ('content-type', 'application/json; charset=utf-8'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.content_type == 'application/json; charset=utf-8' def test_content_type_none_when_missing(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.content_type is None class TestHTTP2RequestConnectionState: """Test connection state methods.""" def _make_stream(self, headers): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers(headers, end_stream=True) return stream def test_should_close_default_false(self): """HTTP/2 connections are persistent by default.""" stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.should_close() is False def test_force_close(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) req.force_close() assert req.should_close() is True assert req.must_close is True class TestHTTP2RequestTrailers: """Test request trailers handling.""" def test_no_trailers(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.trailers == [] def test_with_trailers(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':method', 'POST'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=False) stream.state = stream.state # Keep state stream.trailers = [ ('grpc-status', '0'), ('grpc-message', 'OK'), ] cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert len(req.trailers) == 2 assert ('GRPC-STATUS', '0') in req.trailers assert ('GRPC-MESSAGE', 'OK') in req.trailers class TestHTTP2RequestMetadata: """Test request metadata properties.""" def _make_stream(self, headers, stream_id=1): conn = MockConnection() stream = HTTP2Stream(stream_id=stream_id, connection=conn) stream.receive_headers(headers, end_stream=True) return stream def test_version_is_http2(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.version == (2, 0) def test_req_number_is_stream_id(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], stream_id=5) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.req_number == 5 def test_peer_addr(self): stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('10.0.0.1', 54321)) assert req.peer_addr == ('10.0.0.1', 54321) assert req.remote_addr == ('10.0.0.1', 54321) def test_proxy_protocol_info_none(self): """HTTP/2 doesn't use proxy protocol through data stream.""" stream = self._make_stream([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ]) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.proxy_protocol_info is None class TestHTTP2RequestRepr: """Test request string representation.""" def test_repr_format(self): conn = MockConnection() stream = HTTP2Stream(stream_id=3, connection=conn) stream.receive_headers([ (':method', 'POST'), (':path', '/api/users'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) repr_str = repr(req) assert "HTTP2Request" in repr_str assert "method=POST" in repr_str assert "path=/api/users" in repr_str assert "stream_id=3" in repr_str class TestHTTP2RequestDefaults: """Test default values when pseudo-headers are missing.""" def test_default_method(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.method == 'GET' def test_default_scheme(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':method', 'GET'), (':path', '/'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.scheme == 'https' def test_default_path(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':method', 'GET'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.uri == '/' assert req.path == '/' class TestHTTP2RequestPriority: """Test HTTP2Request priority attributes.""" def test_default_priority_values(self): """Test that request inherits default stream priority.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.priority_weight == 16 assert req.priority_depends_on == 0 def test_custom_priority_values(self): """Test that request inherits custom stream priority.""" conn = MockConnection() stream = HTTP2Stream(stream_id=3, connection=conn) # Update priority before creating request stream.update_priority(weight=200, depends_on=1) stream.receive_headers([ (':method', 'POST'), (':path', '/api/data'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=False) stream.receive_data(b'{"data": "test"}', end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('192.168.1.100', 54321)) assert req.priority_weight == 200 assert req.priority_depends_on == 1 def test_priority_reflects_stream_at_request_creation(self): """Test that priority reflects stream state when request is created.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() # Create request with default priority req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) assert req.priority_weight == 16 # Update stream priority after request was created stream.update_priority(weight=256) # Request should still have old value (captured at creation time) assert req.priority_weight == 16 # Stream has new value assert stream.priority_weight == 256 class MockWSGIConfig: """Mock gunicorn configuration with WSGI-required attributes.""" def __init__(self): self.errorlog = '-' self.workers = 1 class TestHTTP2RequestWSGIEnviron: """Test HTTP/2 priority in WSGI environ.""" def test_priority_in_wsgi_environ(self): """Test that HTTP/2 priority is added to WSGI environ.""" from unittest import mock from gunicorn.http.wsgi import create conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.update_priority(weight=128, depends_on=3) stream.receive_headers([ (':method', 'GET'), (':path', '/test'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) cfg = MockConfig() req = HTTP2Request(stream, cfg, ('127.0.0.1', 12345)) # Create a mock socket mock_sock = mock.Mock() mock_sock.getsockname.return_value = ('127.0.0.1', 8443) # Use WSGI config for environ creation wsgi_cfg = MockWSGIConfig() # Create WSGI environ resp, environ = create(req, mock_sock, ('127.0.0.1', 12345), ('127.0.0.1', 8443), wsgi_cfg) # Verify priority is in environ assert environ.get('gunicorn.http2.priority_weight') == 128 assert environ.get('gunicorn.http2.priority_depends_on') == 3 def test_priority_not_in_environ_for_http1(self): """Test that HTTP/1 requests don't have priority keys.""" from unittest import mock from gunicorn.http.wsgi import create # Create a mock HTTP/1 request (no priority attributes) mock_req = mock.Mock() mock_req.headers = [('HOST', 'example.com')] mock_req.scheme = 'https' mock_req.path = '/test' mock_req.query = '' mock_req.fragment = '' mock_req.method = 'GET' mock_req.uri = '/test' mock_req.version = (1, 1) mock_req._expected_100_continue = False mock_req.proxy_protocol_info = None mock_req.body = mock.Mock() # Remove priority attributes to simulate HTTP/1 request del mock_req.priority_weight del mock_req.priority_depends_on wsgi_cfg = MockWSGIConfig() mock_sock = mock.Mock() mock_sock.getsockname.return_value = ('127.0.0.1', 8443) resp, environ = create(mock_req, mock_sock, ('127.0.0.1', 12345), ('127.0.0.1', 8443), wsgi_cfg) # HTTP/1 requests should not have priority keys assert 'gunicorn.http2.priority_weight' not in environ assert 'gunicorn.http2.priority_depends_on' not in environ benoitc-gunicorn-f5fb19e/tests/test_http2_stream.py000066400000000000000000000641001514360242400226530ustar00rootroot00000000000000# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """Tests for HTTP/2 stream state management.""" import pytest from gunicorn.http2.stream import HTTP2Stream, StreamState from gunicorn.http2.errors import HTTP2StreamError class MockConnection: """Mock HTTP/2 connection for testing streams.""" def __init__(self, initial_window_size=65535): self.initial_window_size = initial_window_size class TestStreamState: """Test StreamState enum values.""" def test_state_values_exist(self): assert StreamState.IDLE is not None assert StreamState.RESERVED_LOCAL is not None assert StreamState.RESERVED_REMOTE is not None assert StreamState.OPEN is not None assert StreamState.HALF_CLOSED_LOCAL is not None assert StreamState.HALF_CLOSED_REMOTE is not None assert StreamState.CLOSED is not None def test_states_are_unique(self): states = [ StreamState.IDLE, StreamState.RESERVED_LOCAL, StreamState.RESERVED_REMOTE, StreamState.OPEN, StreamState.HALF_CLOSED_LOCAL, StreamState.HALF_CLOSED_REMOTE, StreamState.CLOSED, ] assert len(states) == len(set(states)) class TestHTTP2StreamInitialization: """Test stream initialization.""" def test_basic_init(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) assert stream.stream_id == 1 assert stream.connection is conn assert stream.state == StreamState.IDLE assert stream.request_headers == [] assert stream.request_complete is False assert stream.response_started is False assert stream.response_headers_sent is False assert stream.response_complete is False assert stream.window_size == 65535 assert stream.trailers is None def test_custom_window_size(self): conn = MockConnection(initial_window_size=32768) stream = HTTP2Stream(stream_id=3, connection=conn) assert stream.window_size == 32768 class TestStreamIdProperties: """Test stream ID classification properties.""" def test_is_client_stream_odd_ids(self): conn = MockConnection() for stream_id in [1, 3, 5, 7, 99, 101]: stream = HTTP2Stream(stream_id=stream_id, connection=conn) assert stream.is_client_stream is True assert stream.is_server_stream is False def test_is_server_stream_even_ids(self): conn = MockConnection() for stream_id in [2, 4, 6, 8, 100, 102]: stream = HTTP2Stream(stream_id=stream_id, connection=conn) assert stream.is_client_stream is False assert stream.is_server_stream is True def test_stream_id_zero(self): conn = MockConnection() stream = HTTP2Stream(stream_id=0, connection=conn) assert stream.is_client_stream is False assert stream.is_server_stream is True class TestCanReceiveProperty: """Test can_receive property.""" def test_can_receive_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN assert stream.can_receive is True def test_can_receive_in_half_closed_local(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_LOCAL assert stream.can_receive is True def test_cannot_receive_in_idle(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) assert stream.state == StreamState.IDLE assert stream.can_receive is False def test_cannot_receive_in_half_closed_remote(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_REMOTE assert stream.can_receive is False def test_cannot_receive_in_closed(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.CLOSED assert stream.can_receive is False class TestCanSendProperty: """Test can_send property.""" def test_can_send_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN assert stream.can_send is True def test_can_send_in_half_closed_remote(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_REMOTE assert stream.can_send is True def test_cannot_send_in_idle(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) assert stream.state == StreamState.IDLE assert stream.can_send is False def test_cannot_send_in_half_closed_local(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_LOCAL assert stream.can_send is False def test_cannot_send_in_closed(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.CLOSED assert stream.can_send is False class TestReceiveHeaders: """Test receive_headers method.""" def test_receive_headers_from_idle(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) headers = [(':method', 'GET'), (':path', '/')] stream.receive_headers(headers, end_stream=False) assert stream.state == StreamState.OPEN assert stream.request_headers == headers assert stream.request_complete is False def test_receive_headers_with_end_stream(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) headers = [(':method', 'GET'), (':path', '/')] stream.receive_headers(headers, end_stream=True) assert stream.state == StreamState.HALF_CLOSED_REMOTE assert stream.request_complete is True def test_receive_headers_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN headers = [('content-type', 'text/plain')] stream.receive_headers(headers, end_stream=False) assert stream.state == StreamState.OPEN assert stream.request_headers == headers def test_receive_headers_extends_existing(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([(':method', 'POST')], end_stream=False) stream.receive_headers([('content-type', 'text/plain')], end_stream=False) assert len(stream.request_headers) == 2 def test_receive_headers_in_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.CLOSED with pytest.raises(HTTP2StreamError) as exc_info: stream.receive_headers([], end_stream=False) assert exc_info.value.stream_id == 1 class TestReceiveData: """Test receive_data method.""" def test_receive_data_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.receive_data(b"Hello, World!", end_stream=False) assert stream.request_body.getvalue() == b"Hello, World!" assert stream.request_complete is False def test_receive_data_with_end_stream(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.receive_data(b"Final data", end_stream=True) assert stream.state == StreamState.HALF_CLOSED_REMOTE assert stream.request_complete is True def test_receive_data_accumulates(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.receive_data(b"Part1") stream.receive_data(b"Part2") stream.receive_data(b"Part3", end_stream=True) assert stream.request_body.getvalue() == b"Part1Part2Part3" def test_receive_data_in_half_closed_local(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_LOCAL stream.receive_data(b"data", end_stream=False) assert stream.request_body.getvalue() == b"data" def test_receive_data_in_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_REMOTE with pytest.raises(HTTP2StreamError) as exc_info: stream.receive_data(b"data", end_stream=False) assert exc_info.value.stream_id == 1 class TestReceiveTrailers: """Test receive_trailers method.""" def test_receive_trailers_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN trailers = [('grpc-status', '0')] stream.receive_trailers(trailers) assert stream.trailers == trailers assert stream.state == StreamState.HALF_CLOSED_REMOTE assert stream.request_complete is True def test_receive_trailers_in_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.CLOSED with pytest.raises(HTTP2StreamError): stream.receive_trailers([]) class TestSendHeaders: """Test send_headers method.""" def test_send_headers_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN headers = [(':status', '200')] stream.send_headers(headers, end_stream=False) assert stream.response_started is True assert stream.response_headers_sent is True assert stream.response_complete is False assert stream.state == StreamState.OPEN def test_send_headers_with_end_stream(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.send_headers([(':status', '204')], end_stream=True) assert stream.state == StreamState.HALF_CLOSED_LOCAL assert stream.response_complete is True def test_send_headers_in_half_closed_remote(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_REMOTE stream.send_headers([(':status', '200')], end_stream=False) assert stream.response_headers_sent is True def test_send_headers_in_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_LOCAL with pytest.raises(HTTP2StreamError): stream.send_headers([], end_stream=False) class TestSendData: """Test send_data method.""" def test_send_data_in_open_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.send_data(b"Response body", end_stream=False) assert stream.response_complete is False def test_send_data_with_end_stream(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.send_data(b"Final", end_stream=True) assert stream.state == StreamState.HALF_CLOSED_LOCAL assert stream.response_complete is True def test_send_data_in_half_closed_remote(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_REMOTE stream.send_data(b"data", end_stream=True) assert stream.state == StreamState.CLOSED def test_send_data_in_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.CLOSED with pytest.raises(HTTP2StreamError): stream.send_data(b"data", end_stream=False) class TestStreamReset: """Test stream reset method.""" def test_reset_default_error_code(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.reset() assert stream.state == StreamState.CLOSED assert stream.response_complete is True assert stream.request_complete is True def test_reset_custom_error_code(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.reset(error_code=0x1) # PROTOCOL_ERROR assert stream.state == StreamState.CLOSED class TestStreamClose: """Test stream close method.""" def test_close_stream(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.close() assert stream.state == StreamState.CLOSED assert stream.response_complete is True assert stream.request_complete is True class TestHalfCloseTransitions: """Test half-close state transitions.""" def test_half_close_local_from_open(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream._half_close_local() assert stream.state == StreamState.HALF_CLOSED_LOCAL def test_half_close_local_from_half_closed_remote(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_REMOTE stream._half_close_local() assert stream.state == StreamState.CLOSED def test_half_close_local_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.IDLE with pytest.raises(HTTP2StreamError): stream._half_close_local() def test_half_close_remote_from_open(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream._half_close_remote() assert stream.state == StreamState.HALF_CLOSED_REMOTE def test_half_close_remote_from_half_closed_local(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.HALF_CLOSED_LOCAL stream._half_close_remote() assert stream.state == StreamState.CLOSED def test_half_close_remote_invalid_state(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.IDLE with pytest.raises(HTTP2StreamError): stream._half_close_remote() class TestGetRequestBody: """Test get_request_body method.""" def test_get_empty_body(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) assert stream.get_request_body() == b"" def test_get_body_after_data(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.state = StreamState.OPEN stream.receive_data(b"Test body content") assert stream.get_request_body() == b"Test body content" class TestGetPseudoHeaders: """Test get_pseudo_headers method.""" def test_extract_pseudo_headers(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.request_headers = [ (':method', 'POST'), (':path', '/api/test'), (':scheme', 'https'), (':authority', 'example.com'), ('content-type', 'application/json'), ('accept', '*/*'), ] pseudo = stream.get_pseudo_headers() assert pseudo == { ':method': 'POST', ':path': '/api/test', ':scheme': 'https', ':authority': 'example.com', } def test_empty_pseudo_headers(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.request_headers = [ ('content-type', 'text/plain'), ] pseudo = stream.get_pseudo_headers() assert pseudo == {} class TestGetRegularHeaders: """Test get_regular_headers method.""" def test_extract_regular_headers(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.request_headers = [ (':method', 'GET'), (':path', '/'), ('content-type', 'text/html'), ('accept-language', 'en-US'), ] regular = stream.get_regular_headers() assert regular == [ ('content-type', 'text/html'), ('accept-language', 'en-US'), ] def test_no_regular_headers(self): conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.request_headers = [ (':method', 'GET'), (':path', '/'), ] regular = stream.get_regular_headers() assert regular == [] class TestStreamRepr: """Test stream string representation.""" def test_repr_format(self): conn = MockConnection() stream = HTTP2Stream(stream_id=5, connection=conn) repr_str = repr(stream) assert "HTTP2Stream" in repr_str assert "id=5" in repr_str assert "state=IDLE" in repr_str assert "req_complete=False" in repr_str assert "resp_complete=False" in repr_str class TestFullStreamLifecycle: """Test complete stream lifecycles.""" def test_simple_get_request(self): """Test a simple GET request lifecycle.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) # Receive request headers (GET with end_stream) stream.receive_headers([ (':method', 'GET'), (':path', '/'), (':scheme', 'https'), (':authority', 'example.com'), ], end_stream=True) assert stream.state == StreamState.HALF_CLOSED_REMOTE assert stream.request_complete is True # Send response headers with body stream.send_headers([(':status', '200')], end_stream=False) assert stream.state == StreamState.HALF_CLOSED_REMOTE # Send response body stream.send_data(b"Hello!", end_stream=True) assert stream.state == StreamState.CLOSED assert stream.response_complete is True def test_post_request_with_body(self): """Test a POST request with body.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) # Receive request headers stream.receive_headers([ (':method', 'POST'), (':path', '/submit'), ('content-type', 'application/json'), ], end_stream=False) assert stream.state == StreamState.OPEN # Receive body data stream.receive_data(b'{"key": "value"}', end_stream=True) assert stream.state == StreamState.HALF_CLOSED_REMOTE assert stream.get_request_body() == b'{"key": "value"}' # Send response stream.send_headers([(':status', '201')], end_stream=False) stream.send_data(b'Created', end_stream=True) assert stream.state == StreamState.CLOSED def test_stream_reset_lifecycle(self): """Test a stream that gets reset.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([(':method', 'GET'), (':path', '/')], end_stream=False) assert stream.state == StreamState.OPEN # Reset the stream stream.reset(error_code=0x8) # CANCEL assert stream.state == StreamState.CLOSED assert stream.request_complete is True assert stream.response_complete is True class TestStreamPriority: """Test stream priority support (RFC 7540 Section 5.3).""" def test_default_priority_values(self): """Test default priority values.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) assert stream.priority_weight == 16 assert stream.priority_depends_on == 0 assert stream.priority_exclusive is False def test_update_priority_weight(self): """Test updating priority weight.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.update_priority(weight=256) assert stream.priority_weight == 256 stream.update_priority(weight=1) assert stream.priority_weight == 1 def test_update_priority_depends_on(self): """Test updating priority dependency.""" conn = MockConnection() stream = HTTP2Stream(stream_id=3, connection=conn) stream.update_priority(depends_on=1) assert stream.priority_depends_on == 1 def test_update_priority_exclusive(self): """Test updating exclusive flag.""" conn = MockConnection() stream = HTTP2Stream(stream_id=3, connection=conn) stream.update_priority(exclusive=True) assert stream.priority_exclusive is True stream.update_priority(exclusive=False) assert stream.priority_exclusive is False def test_update_priority_all_fields(self): """Test updating all priority fields at once.""" conn = MockConnection() stream = HTTP2Stream(stream_id=5, connection=conn) stream.update_priority(weight=128, depends_on=1, exclusive=True) assert stream.priority_weight == 128 assert stream.priority_depends_on == 1 assert stream.priority_exclusive is True def test_update_priority_partial(self): """Test that partial updates don't affect other fields.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) # Set initial values stream.update_priority(weight=200, depends_on=3, exclusive=True) # Update only weight stream.update_priority(weight=100) assert stream.priority_weight == 100 assert stream.priority_depends_on == 3 # unchanged assert stream.priority_exclusive is True # unchanged def test_weight_clamped_to_min(self): """Test that weight is clamped to minimum of 1.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.update_priority(weight=0) assert stream.priority_weight == 1 stream.update_priority(weight=-10) assert stream.priority_weight == 1 def test_weight_clamped_to_max(self): """Test that weight is clamped to maximum of 256.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.update_priority(weight=300) assert stream.priority_weight == 256 stream.update_priority(weight=1000) assert stream.priority_weight == 256 class TestStreamResponseTrailers: """Test response trailer support.""" def test_response_trailers_default_none(self): """Test that response_trailers defaults to None.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) assert stream.response_trailers is None def test_send_trailers_in_open_state(self): """Test sending trailers in OPEN state.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) # Open the stream stream.receive_headers([(':method', 'GET'), (':path', '/')], end_stream=True) assert stream.state == StreamState.HALF_CLOSED_REMOTE # Send response headers stream.send_headers([(':status', '200')], end_stream=False) # Send trailers trailers = [('grpc-status', '0'), ('grpc-message', 'OK')] stream.send_trailers(trailers) assert stream.response_trailers == trailers assert stream.state == StreamState.CLOSED assert stream.response_complete is True def test_send_trailers_after_body(self): """Test sending trailers after response body.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) # Open the stream stream.receive_headers([(':method', 'POST'), (':path', '/api')], end_stream=False) stream.receive_data(b'request body', end_stream=True) # Send response stream.send_headers([(':status', '200')], end_stream=False) stream.send_data(b'response body', end_stream=False) # Send trailers trailers = [('content-md5', 'abc123')] stream.send_trailers(trailers) assert stream.response_trailers == trailers assert stream.state == StreamState.CLOSED def test_send_trailers_closes_stream(self): """Test that trailers close the stream.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) stream.receive_headers([(':method', 'GET'), (':path', '/')], end_stream=True) stream.send_headers([(':status', '200')], end_stream=False) assert stream.can_send is True stream.send_trailers([('trailer', 'value')]) assert stream.can_send is False assert stream.response_complete is True def test_send_trailers_invalid_state_raises(self): """Test that sending trailers in invalid state raises error.""" conn = MockConnection() stream = HTTP2Stream(stream_id=1, connection=conn) # Stream is IDLE, cannot send trailers with pytest.raises(HTTP2StreamError): stream.send_trailers([('trailer', 'value')]) benoitc-gunicorn-f5fb19e/tests/test_invalid_requests.py000066400000000000000000000010771514360242400236240ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import glob import os import pytest import treq dirname = os.path.dirname(__file__) reqdir = os.path.join(dirname, "requests", "invalid") httpfiles = glob.glob(os.path.join(reqdir, "*.http")) @pytest.mark.parametrize("fname", httpfiles) def test_http_parser(fname): env = treq.load_py(os.path.splitext(fname)[0] + ".py") expect = env["request"] cfg = env["cfg"] req = treq.badrequest(fname) with pytest.raises(expect): req.check(cfg) benoitc-gunicorn-f5fb19e/tests/test_logger.py000066400000000000000000000062561514360242400215260ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import datetime from types import SimpleNamespace import pytest from gunicorn.config import Config from gunicorn.glogging import Logger def test_atoms_defaults(): response = SimpleNamespace( status='200', response_length=1024, headers=(('Content-Type', 'application/json'),), sent=1024, ) request = SimpleNamespace(headers=(('Accept', 'application/json'),)) environ = { 'REQUEST_METHOD': 'GET', 'RAW_URI': '/my/path?foo=bar', 'PATH_INFO': '/my/path', 'QUERY_STRING': 'foo=bar', 'SERVER_PROTOCOL': 'HTTP/1.1', } logger = Logger(Config()) atoms = logger.atoms(response, request, environ, datetime.timedelta(seconds=1)) assert isinstance(atoms, dict) assert atoms['r'] == 'GET /my/path?foo=bar HTTP/1.1' assert atoms['m'] == 'GET' assert atoms['U'] == '/my/path' assert atoms['q'] == 'foo=bar' assert atoms['H'] == 'HTTP/1.1' assert atoms['b'] == '1024' assert atoms['B'] == 1024 assert atoms['{accept}i'] == 'application/json' assert atoms['{content-type}o'] == 'application/json' def test_atoms_zero_bytes(): response = SimpleNamespace( status='200', response_length=0, headers=(('Content-Type', 'application/json'),), sent=0, ) request = SimpleNamespace(headers=(('Accept', 'application/json'),)) environ = { 'REQUEST_METHOD': 'GET', 'RAW_URI': '/my/path?foo=bar', 'PATH_INFO': '/my/path', 'QUERY_STRING': 'foo=bar', 'SERVER_PROTOCOL': 'HTTP/1.1', } logger = Logger(Config()) atoms = logger.atoms(response, request, environ, datetime.timedelta(seconds=1)) assert atoms['b'] == '0' assert atoms['B'] == 0 @pytest.mark.parametrize('auth', [ # auth type is case in-sensitive 'Basic YnJrMHY6', 'basic YnJrMHY6', 'BASIC YnJrMHY6', ]) def test_get_username_from_basic_auth_header(auth): request = SimpleNamespace(headers=()) response = SimpleNamespace( status='200', response_length=1024, sent=1024, headers=(('Content-Type', 'text/plain'),), ) environ = { 'REQUEST_METHOD': 'GET', 'RAW_URI': '/my/path?foo=bar', 'PATH_INFO': '/my/path', 'QUERY_STRING': 'foo=bar', 'SERVER_PROTOCOL': 'HTTP/1.1', 'HTTP_AUTHORIZATION': auth, } logger = Logger(Config()) atoms = logger.atoms(response, request, environ, datetime.timedelta(seconds=1)) assert atoms['u'] == 'brk0v' def test_get_username_handles_malformed_basic_auth_header(): """Should catch a malformed auth header""" request = SimpleNamespace(headers=()) response = SimpleNamespace( status='200', response_length=1024, sent=1024, headers=(('Content-Type', 'text/plain'),), ) environ = { 'REQUEST_METHOD': 'GET', 'RAW_URI': '/my/path?foo=bar', 'PATH_INFO': '/my/path', 'QUERY_STRING': 'foo=bar', 'SERVER_PROTOCOL': 'HTTP/1.1', 'HTTP_AUTHORIZATION': 'Basic ixsTtkKzIpVTncfQjbBcnoRNoDfbnaXG', } logger = Logger(Config()) atoms = logger.atoms(response, request, environ, datetime.timedelta(seconds=1)) assert atoms['u'] == '-' benoitc-gunicorn-f5fb19e/tests/test_pidfile.py000066400000000000000000000027341514360242400216600ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import errno from unittest import mock import gunicorn.pidfile def builtin(name): return 'builtins.{}'.format(name) @mock.patch(builtin('open'), new_callable=mock.mock_open) def test_validate_no_file(_open): pidfile = gunicorn.pidfile.Pidfile('test.pid') _open.side_effect = IOError(errno.ENOENT) assert pidfile.validate() is None @mock.patch(builtin('open'), new_callable=mock.mock_open, read_data='1') @mock.patch('os.kill') def test_validate_file_pid_exists(kill, _open): pidfile = gunicorn.pidfile.Pidfile('test.pid') assert pidfile.validate() == 1 assert kill.called @mock.patch(builtin('open'), new_callable=mock.mock_open, read_data='a') def test_validate_file_pid_malformed(_open): pidfile = gunicorn.pidfile.Pidfile('test.pid') assert pidfile.validate() is None @mock.patch(builtin('open'), new_callable=mock.mock_open, read_data='1') @mock.patch('os.kill') def test_validate_file_pid_exists_kill_exception(kill, _open): pidfile = gunicorn.pidfile.Pidfile('test.pid') kill.side_effect = OSError(errno.EPERM) assert pidfile.validate() == 1 @mock.patch(builtin('open'), new_callable=mock.mock_open, read_data='1') @mock.patch('os.kill') def test_validate_file_pid_does_not_exist(kill, _open): pidfile = gunicorn.pidfile.Pidfile('test.pid') kill.side_effect = OSError(errno.ESRCH) assert pidfile.validate() is None benoitc-gunicorn-f5fb19e/tests/test_reload.py000066400000000000000000000036451514360242400215140ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import unittest.mock as mock from gunicorn.app.base import Application from gunicorn.workers.base import Worker from gunicorn.reloader import reloader_engines class ReloadApp(Application): def __init__(self): super().__init__("no usage", prog="gunicorn_test") def do_load_config(self): self.load_default_config() self.cfg.set('reload', True) self.cfg.set('reload_engine', 'poll') class SyntaxErrorApp(ReloadApp): def wsgi(self): error = SyntaxError('invalid syntax') error.filename = 'syntax_error_filename' raise error class MyWorker(Worker): def run(self): pass def test_reload_on_syntax_error(): """ Test that reloading works if the application has a syntax error. """ reloader = mock.Mock() reloader_engines['poll'] = lambda *args, **kw: reloader app = SyntaxErrorApp() cfg = app.cfg log = mock.Mock() worker = MyWorker(age=0, ppid=0, sockets=[], app=app, timeout=0, cfg=cfg, log=log) worker.init_process() reloader.start.assert_called_with() reloader.add_extra_file.assert_called_with('syntax_error_filename') def test_start_reloader_after_load_wsgi(): """ Check that the reloader is started after the wsgi app has been loaded. """ reloader = mock.Mock() reloader_engines['poll'] = lambda *args, **kw: reloader app = ReloadApp() cfg = app.cfg log = mock.Mock() worker = MyWorker(age=0, ppid=0, sockets=[], app=app, timeout=0, cfg=cfg, log=log) worker.load_wsgi = mock.Mock() mock_parent = mock.Mock() mock_parent.attach_mock(worker.load_wsgi, 'load_wsgi') mock_parent.attach_mock(reloader.start, 'reloader_start') worker.init_process() mock_parent.assert_has_calls([ mock.call.load_wsgi(), mock.call.reloader_start(), ]) benoitc-gunicorn-f5fb19e/tests/test_signal_integration.py000066400000000000000000000152761514360242400241310ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Integration tests for arbiter signal handling. These tests start a real gunicorn process and verify signal handling works correctly with actual requests and signals. """ import os import signal import socket import subprocess import sys import time import pytest # Timeout for CI environments (VMs can be slow, PyPy needs more time) CI_TIMEOUT = 90 # Simple WSGI app inline SIMPLE_APP = ''' def application(environ, start_response): """Basic hello world response.""" status = '200 OK' body = b'Hello, World!' headers = [ ('Content-Type', 'text/plain'), ('Content-Length', str(len(body))), ] start_response(status, headers) return [body] ''' def find_free_port(): """Find a free port to bind to.""" with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('127.0.0.1', 0)) return s.getsockname()[1] def wait_for_server(host, port, timeout=CI_TIMEOUT): """Wait until server is accepting connections.""" start = time.monotonic() while time.monotonic() - start < timeout: try: with socket.create_connection((host, port), timeout=1): return True except (ConnectionRefusedError, socket.timeout, OSError): time.sleep(0.1) return False def make_request(host, port, path='/'): """Make a simple HTTP request and return the response body.""" with socket.create_connection((host, port), timeout=5) as sock: request = f'GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: close\r\n\r\n' sock.sendall(request.encode()) response = b'' while True: chunk = sock.recv(4096) if not chunk: break response += chunk return response @pytest.fixture def app_module(tmp_path): """Create a temporary app module.""" app_file = tmp_path / "app.py" app_file.write_text(SIMPLE_APP) return str(app_file.parent), "app:application" @pytest.fixture def gunicorn_server(app_module): """Start and stop a gunicorn server.""" app_dir, app_name = app_module port = find_free_port() # Start gunicorn cmd = [ sys.executable, '-m', 'gunicorn', '--bind', f'127.0.0.1:{port}', '--workers', '2', '--worker-class', 'sync', '--access-logfile', '-', '--error-logfile', '-', '--log-level', 'info', '--timeout', '30', '--graceful-timeout', '30', app_name ] # Use setsid to create new process group for proper signal handling proc = subprocess.Popen( cmd, cwd=app_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={**os.environ, 'PYTHONPATH': app_dir}, preexec_fn=os.setsid ) # Wait for server to start if not wait_for_server('127.0.0.1', port): proc.terminate() proc.wait() stdout, stderr = proc.communicate() pytest.fail(f"Gunicorn failed to start:\nstdout: {stdout.decode()}\nstderr: {stderr.decode()}") yield proc, port # Cleanup - use process group kill for better cleanup if proc.poll() is None: try: os.killpg(os.getpgid(proc.pid), signal.SIGTERM) except (ProcessLookupError, OSError): pass try: proc.wait(timeout=5) except subprocess.TimeoutExpired: try: os.killpg(os.getpgid(proc.pid), signal.SIGKILL) except (ProcessLookupError, OSError): pass proc.wait() class TestSignalHandlingIntegration: """Integration tests for signal handling.""" def test_basic_request(self, gunicorn_server): """Verify the server responds to basic requests.""" proc, port = gunicorn_server response = make_request('127.0.0.1', port) assert b'Hello, World!' in response def test_graceful_shutdown_sigterm(self, gunicorn_server): """Verify SIGTERM causes graceful shutdown.""" proc, port = gunicorn_server # Verify server is working response = make_request('127.0.0.1', port) assert b'Hello, World!' in response # Send SIGTERM to the process group for reliable signal delivery try: os.killpg(os.getpgid(proc.pid), signal.SIGTERM) except (ProcessLookupError, OSError): proc.send_signal(signal.SIGTERM) # Wait for process to exit try: exit_code = proc.wait(timeout=CI_TIMEOUT) assert exit_code == 0, f"Expected exit code 0, got {exit_code}" except subprocess.TimeoutExpired: proc.kill() pytest.fail("Gunicorn did not exit within timeout after SIGTERM") def test_graceful_shutdown_sigint(self, gunicorn_server): """Verify SIGINT causes graceful shutdown.""" proc, port = gunicorn_server # Verify server is working response = make_request('127.0.0.1', port) assert b'Hello, World!' in response # Send SIGINT to the process group for reliable signal delivery try: os.killpg(os.getpgid(proc.pid), signal.SIGINT) except (ProcessLookupError, OSError): proc.send_signal(signal.SIGINT) # Wait for process to exit try: exit_code = proc.wait(timeout=CI_TIMEOUT) assert exit_code == 0, f"Expected exit code 0, got {exit_code}" except subprocess.TimeoutExpired: proc.kill() pytest.fail("Gunicorn did not exit within timeout after SIGINT") def test_sighup_reload(self, gunicorn_server): """Verify SIGHUP triggers reload.""" proc, port = gunicorn_server # Verify server is working response = make_request('127.0.0.1', port) assert b'Hello, World!' in response # Send SIGHUP to the master process (not process group - only master handles reload) proc.send_signal(signal.SIGHUP) # Wait a moment for reload time.sleep(2) # Verify server still works after reload assert proc.poll() is None, "Server died after SIGHUP" response = make_request('127.0.0.1', port) assert b'Hello, World!' in response def test_multiple_requests_under_load(self, gunicorn_server): """Verify server handles multiple concurrent requests.""" proc, port = gunicorn_server # Make several requests in sequence for _ in range(10): response = make_request('127.0.0.1', port) assert b'Hello, World!' in response # Verify server is still running assert proc.poll() is None if __name__ == '__main__': pytest.main([__file__, '-v']) benoitc-gunicorn-f5fb19e/tests/test_sock.py000066400000000000000000000034751514360242400212060ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from unittest import mock from gunicorn import sock @mock.patch('os.stat') def test_create_sockets_unix_bytes(stat): conf = mock.Mock(address=[b'127.0.0.1:8000']) log = mock.Mock() with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None): listeners = sock.create_sockets(conf, log) assert len(listeners) == 1 print(type(listeners[0])) assert isinstance(listeners[0], sock.UnixSocket) @mock.patch('os.stat') def test_create_sockets_unix_strings(stat): conf = mock.Mock(address=['127.0.0.1:8000']) log = mock.Mock() with mock.patch.object(sock.UnixSocket, '__init__', lambda *args: None): listeners = sock.create_sockets(conf, log) assert len(listeners) == 1 assert isinstance(listeners[0], sock.UnixSocket) def test_socket_close(): listener1 = mock.Mock() listener1.getsockname.return_value = ('127.0.0.1', '80') listener2 = mock.Mock() listener2.getsockname.return_value = ('192.168.2.5', '80') sock.close_sockets([listener1, listener2]) listener1.close.assert_called_with() listener2.close.assert_called_with() @mock.patch('os.unlink') def test_unix_socket_close_unlink(unlink): listener = mock.Mock() listener.getsockname.return_value = '/var/run/test.sock' sock.close_sockets([listener]) listener.close.assert_called_with() unlink.assert_called_once_with('/var/run/test.sock') @mock.patch('os.unlink') def test_unix_socket_close_without_unlink(unlink): listener = mock.Mock() listener.getsockname.return_value = '/var/run/test.sock' sock.close_sockets([listener], False) listener.close.assert_called_with() assert not unlink.called, 'unlink should not have been called' benoitc-gunicorn-f5fb19e/tests/test_ssl.py000066400000000000000000000037061514360242400210450ustar00rootroot00000000000000# Copyright 2013 Dariusz Suchojad # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import pytest from gunicorn.config import ( KeyFile, CertFile, CACerts, SuppressRaggedEOFs, DoHandshakeOnConnect, Setting, Ciphers, ) ssl = pytest.importorskip('ssl') def test_keyfile(): assert issubclass(KeyFile, Setting) assert KeyFile.name == 'keyfile' assert KeyFile.section == 'SSL' assert KeyFile.cli == ['--keyfile'] assert KeyFile.meta == 'FILE' assert KeyFile.default is None def test_certfile(): assert issubclass(CertFile, Setting) assert CertFile.name == 'certfile' assert CertFile.section == 'SSL' assert CertFile.cli == ['--certfile'] assert CertFile.default is None def test_cacerts(): assert issubclass(CACerts, Setting) assert CACerts.name == 'ca_certs' assert CACerts.section == 'SSL' assert CACerts.cli == ['--ca-certs'] assert CACerts.meta == 'FILE' assert CACerts.default is None def test_suppress_ragged_eofs(): assert issubclass(SuppressRaggedEOFs, Setting) assert SuppressRaggedEOFs.name == 'suppress_ragged_eofs' assert SuppressRaggedEOFs.section == 'SSL' assert SuppressRaggedEOFs.cli == ['--suppress-ragged-eofs'] assert SuppressRaggedEOFs.action == 'store_true' assert SuppressRaggedEOFs.default is True def test_do_handshake_on_connect(): assert issubclass(DoHandshakeOnConnect, Setting) assert DoHandshakeOnConnect.name == 'do_handshake_on_connect' assert DoHandshakeOnConnect.section == 'SSL' assert DoHandshakeOnConnect.cli == ['--do-handshake-on-connect'] assert DoHandshakeOnConnect.action == 'store_true' assert DoHandshakeOnConnect.default is False def test_ciphers(): assert issubclass(Ciphers, Setting) assert Ciphers.name == 'ciphers' assert Ciphers.section == 'SSL' assert Ciphers.cli == ['--ciphers'] assert Ciphers.default is None benoitc-gunicorn-f5fb19e/tests/test_statsd.py000066400000000000000000000114641514360242400215460ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import logging import os import shutil import socket import tempfile from datetime import timedelta from types import SimpleNamespace from gunicorn.config import Config from gunicorn.instrument.statsd import Statsd class StatsdTestException(Exception): pass class MockSocket: "Pretend to be a UDP socket" def __init__(self, failp): self.failp = failp self.msgs = [] # accumulate messages for later inspection def send(self, msg): if self.failp: raise StatsdTestException("Should not interrupt the logger") sock_dir = tempfile.mkdtemp() sock_file = os.path.join(sock_dir, "test.sock") server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: server.bind(sock_file) client.connect(sock_file) client.send(msg) self.msgs.append(server.recv(1024)) finally: client.close() server.close() shutil.rmtree(sock_dir) def reset(self): self.msgs = [] def test_statsd_fail(): "UDP socket fails" logger = Statsd(Config()) logger.sock = MockSocket(True) logger.info("No impact on logging") logger.debug("No impact on logging") logger.critical("No impact on logging") logger.error("No impact on logging") logger.warning("No impact on logging") logger.exception("No impact on logging") def test_statsd_host_initialization(): c = Config() c.set('statsd_host', 'unix:test.sock') logger = Statsd(c) logger.info("Can be initialized and used with a UDS socket") # Can be initialized and used with a UDP address c.set('statsd_host', 'host:8080') logger = Statsd(c) logger.info("Can be initialized and used with a UDP socket") def test_dogstatsd_tags(): c = Config() tags = 'yucatan,libertine:rhubarb' c.set('dogstatsd_tags', tags) logger = Statsd(c) logger.sock = MockSocket(False) logger.info("Twill", extra={"mtype": "gauge", "metric": "barb.westerly", "value": 2}) assert logger.sock.msgs[0] == b"barb.westerly:2|g|#" + tags.encode('ascii') def test_instrument(): logger = Statsd(Config()) # Capture logged messages sio = io.StringIO() logger.error_log.addHandler(logging.StreamHandler(sio)) logger.sock = MockSocket(False) # Regular message logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666}) assert logger.sock.msgs[0] == b"gunicorn.test:666|g" assert sio.getvalue() == "Blah\n" logger.sock.reset() # Only metrics, no logging logger.info("", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666}) assert logger.sock.msgs[0] == b"gunicorn.test:666|g" assert sio.getvalue() == "Blah\n" # log is unchanged logger.sock.reset() # Debug logging also supports metrics logger.debug("", extra={"mtype": "gauge", "metric": "gunicorn.debug", "value": 667}) assert logger.sock.msgs[0] == b"gunicorn.debug:667|g" assert sio.getvalue() == "Blah\n" # log is unchanged logger.sock.reset() logger.critical("Boom") assert logger.sock.msgs[0] == b"gunicorn.log.critical:1|c|@1.0" logger.sock.reset() logger.access(SimpleNamespace(status="200 OK"), None, {}, timedelta(seconds=7)) assert logger.sock.msgs[0] == b"gunicorn.request.duration:7000.0|ms" assert logger.sock.msgs[1] == b"gunicorn.requests:1|c|@1.0" assert logger.sock.msgs[2] == b"gunicorn.request.status.200:1|c|@1.0" def test_prefix(): c = Config() c.set("statsd_prefix", "test.") logger = Statsd(c) logger.sock = MockSocket(False) logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666}) assert logger.sock.msgs[0] == b"test.gunicorn.test:666|g" def test_prefix_no_dot(): c = Config() c.set("statsd_prefix", "test") logger = Statsd(c) logger.sock = MockSocket(False) logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666}) assert logger.sock.msgs[0] == b"test.gunicorn.test:666|g" def test_prefix_multiple_dots(): c = Config() c.set("statsd_prefix", "test...") logger = Statsd(c) logger.sock = MockSocket(False) logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666}) assert logger.sock.msgs[0] == b"test.gunicorn.test:666|g" def test_prefix_nested(): c = Config() c.set("statsd_prefix", "test.asdf.") logger = Statsd(c) logger.sock = MockSocket(False) logger.info("Blah", extra={"mtype": "gauge", "metric": "gunicorn.test", "value": 666}) assert logger.sock.msgs[0] == b"test.asdf.gunicorn.test:666|g" benoitc-gunicorn-f5fb19e/tests/test_systemd.py000066400000000000000000000037531514360242400217360ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from contextlib import contextmanager import os from unittest import mock import pytest from gunicorn import systemd @contextmanager def check_environ(unset=True): """ A context manager that asserts post-conditions of ``listen_fds`` at exit. This helper is used to ease checking of the test post-conditions for the systemd socket activation tests that parametrize the call argument. """ with mock.patch.dict(os.environ): old_fds = os.environ.get('LISTEN_FDS', None) old_pid = os.environ.get('LISTEN_PID', None) yield if unset: assert 'LISTEN_FDS' not in os.environ, \ "LISTEN_FDS should have been unset" assert 'LISTEN_PID' not in os.environ, \ "LISTEN_PID should have been unset" else: new_fds = os.environ.get('LISTEN_FDS', None) new_pid = os.environ.get('LISTEN_PID', None) assert new_fds == old_fds, \ "LISTEN_FDS should not have been changed" assert new_pid == old_pid, \ "LISTEN_PID should not have been changed" @pytest.mark.parametrize("unset", [True, False]) def test_listen_fds_ignores_wrong_pid(unset): with mock.patch.dict(os.environ): os.environ['LISTEN_FDS'] = str(5) os.environ['LISTEN_PID'] = str(1) with check_environ(False): # early exit — never changes the environment assert systemd.listen_fds(unset) == 0, \ "should ignore listen fds not intended for this pid" @pytest.mark.parametrize("unset", [True, False]) def test_listen_fds_returns_count(unset): with mock.patch.dict(os.environ): os.environ['LISTEN_FDS'] = str(5) os.environ['LISTEN_PID'] = str(os.getpid()) with check_environ(unset): assert systemd.listen_fds(unset) == 5, \ "should return the correct count of fds" benoitc-gunicorn-f5fb19e/tests/test_util.py000066400000000000000000000103711514360242400212150ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import pytest from gunicorn import util from gunicorn.errors import AppImportError from urllib.parse import SplitResult @pytest.mark.parametrize('test_input, expected', [ ('unix://var/run/test.sock', 'var/run/test.sock'), ('unix:/var/run/test.sock', '/var/run/test.sock'), ('tcp://localhost', ('localhost', 8000)), ('tcp://localhost:5000', ('localhost', 5000)), ('', ('0.0.0.0', 8000)), ('[::1]:8000', ('::1', 8000)), ('[::1]:5000', ('::1', 5000)), ('[::1]', ('::1', 8000)), ('localhost:8000', ('localhost', 8000)), ('127.0.0.1:8000', ('127.0.0.1', 8000)), ('localhost', ('localhost', 8000)), ('fd://33', 33), ]) def test_parse_address(test_input, expected): assert util.parse_address(test_input) == expected def test_parse_address_invalid(): with pytest.raises(RuntimeError) as exc_info: util.parse_address('127.0.0.1:test') assert "'test' is not a valid port number." in str(exc_info.value) def test_parse_fd_invalid(): with pytest.raises(RuntimeError) as exc_info: util.parse_address('fd://asd') assert "'asd' is not a valid file descriptor." in str(exc_info.value) def test_http_date(): assert util.http_date(1508607753.740316) == 'Sat, 21 Oct 2017 17:42:33 GMT' @pytest.mark.parametrize('test_input, expected', [ ('1200:0000:AB00:1234:0000:2552:7777:1313', True), ('1200::AB00:1234::2552:7777:1313', False), ('21DA:D3:0:2F3B:2AA:FF:FE28:9C5A', True), ('1200:0000:AB00:1234:O000:2552:7777:1313', False), ]) def test_is_ipv6(test_input, expected): assert util.is_ipv6(test_input) == expected def test_warn(capsys): util.warn('test warn') _, err = capsys.readouterr() assert '!!! WARNING: test warn' in err @pytest.mark.parametrize( "value", [ "support", "support:app", "support:create_app()", "support:create_app('Gunicorn', 3)", "support:create_app(count=3)", ], ) def test_import_app_good(value): assert util.import_app(value) @pytest.mark.parametrize( ("value", "exc_type", "msg"), [ ("a:app", ImportError, "No module"), ("support:create_app(", AppImportError, "Failed to parse"), ("support:create.app()", AppImportError, "Function reference"), ("support:create_app(Gunicorn)", AppImportError, "literal values"), ("support:create.app", AppImportError, "attribute name"), ("support:wrong_app", AppImportError, "find attribute"), ("support:error_factory(1)", AppImportError, "error_factory() takes"), ("support:error_factory()", TypeError, "inner"), ("support:none_app", AppImportError, "find application object"), ("support:HOST", AppImportError, "callable"), ], ) def test_import_app_bad(value, exc_type, msg): with pytest.raises(exc_type) as exc_info: util.import_app(value) assert msg in str(exc_info.value) def test_import_app_py_ext(monkeypatch): monkeypatch.chdir(os.path.dirname(__file__)) with pytest.raises(ImportError) as exc_info: util.import_app("support.py") assert "did you mean" in str(exc_info.value) def test_to_bytestring(): assert util.to_bytestring('test_str', 'ascii') == b'test_str' assert util.to_bytestring('test_str®') == b'test_str\xc2\xae' assert util.to_bytestring(b'byte_test_str') == b'byte_test_str' with pytest.raises(TypeError) as exc_info: util.to_bytestring(100) msg = '100 is not a string' assert msg in str(exc_info.value) @pytest.mark.parametrize('test_input, expected', [ ('https://example.org/a/b?c=1#d', SplitResult(scheme='https', netloc='example.org', path='/a/b', query='c=1', fragment='d')), ('a/b?c=1#d', SplitResult(scheme='', netloc='', path='a/b', query='c=1', fragment='d')), ('/a/b?c=1#d', SplitResult(scheme='', netloc='', path='/a/b', query='c=1', fragment='d')), ('//a/b?c=1#d', SplitResult(scheme='', netloc='', path='//a/b', query='c=1', fragment='d')), ('///a/b?c=1#d', SplitResult(scheme='', netloc='', path='///a/b', query='c=1', fragment='d')), ]) def test_split_request_uri(test_input, expected): assert util.split_request_uri(test_input) == expected benoitc-gunicorn-f5fb19e/tests/test_uwsgi.py000066400000000000000000000340621514360242400214010ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import pytest from unittest import mock from gunicorn.uwsgi import ( UWSGIRequest, UWSGIParser, UWSGIParseException, InvalidUWSGIHeader, UnsupportedModifier, ForbiddenUWSGIRequest, ) from gunicorn.http.unreader import IterUnreader def make_uwsgi_packet(vars_dict, modifier1=0, modifier2=0): """Create uWSGI packet for testing. Args: vars_dict: Dict of WSGI environ variables modifier1: Packet type (0 = WSGI request) modifier2: Additional flags Returns: bytes: Complete uWSGI packet """ vars_data = b'' for key, value in vars_dict.items(): k = key.encode('latin-1') v = value.encode('latin-1') vars_data += len(k).to_bytes(2, 'little') + k vars_data += len(v).to_bytes(2, 'little') + v header = bytes([modifier1]) + len(vars_data).to_bytes(2, 'little') + bytes([modifier2]) return header + vars_data def make_uwsgi_packet_with_body(vars_dict, body=b'', modifier1=0, modifier2=0): """Create uWSGI packet with body for testing.""" if body: vars_dict = dict(vars_dict) vars_dict['CONTENT_LENGTH'] = str(len(body)) return make_uwsgi_packet(vars_dict, modifier1, modifier2) + body class MockConfig: """Mock config object for testing.""" def __init__(self, is_ssl=False, uwsgi_allow_ips=None): self.is_ssl = is_ssl self.uwsgi_allow_ips = uwsgi_allow_ips or ['127.0.0.1', '::1'] class TestUWSGIPacketConstruction: """Test the packet construction helper.""" def test_empty_vars(self): packet = make_uwsgi_packet({}) assert packet == b'\x00\x00\x00\x00' # modifier1=0, size=0, modifier2=0 def test_single_var(self): packet = make_uwsgi_packet({'KEY': 'val'}) # Header: modifier1(0) + size(10 in LE) + modifier2(0) # Var: key_size(3 in LE) + 'KEY' + val_size(3 in LE) + 'val' # Size = 2 + 3 + 2 + 3 = 10 bytes expected_header = b'\x00\x0a\x00\x00' expected_var = b'\x03\x00KEY\x03\x00val' assert packet == expected_header + expected_var def test_multiple_vars(self): packet = make_uwsgi_packet({'A': '1', 'B': '2'}) assert len(packet) == 4 + (2 + 1 + 2 + 1) * 2 # header + 2 vars class TestUWSGIRequest: """Test UWSGIRequest parsing.""" def test_parse_simple_request(self): """Test parsing a simple GET request.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/test', 'QUERY_STRING': 'foo=bar', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.method == 'GET' assert req.path == '/test' assert req.query == 'foo=bar' assert req.uri == '/test?foo=bar' def test_parse_post_request_with_body(self): """Test parsing a POST request with body.""" body = b'name=test&value=123' packet = make_uwsgi_packet_with_body({ 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/submit', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', }, body) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.method == 'POST' assert req.path == '/submit' assert req.body.read() == body def test_parse_headers(self): """Test that HTTP_* vars become headers.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTP_HOST': 'example.com', 'HTTP_USER_AGENT': 'TestClient/1.0', 'HTTP_ACCEPT': 'text/html', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) headers_dict = dict(req.headers) assert headers_dict['HOST'] == 'example.com' assert headers_dict['USER-AGENT'] == 'TestClient/1.0' assert headers_dict['ACCEPT'] == 'text/html' def test_parse_content_type_header(self): """Test that CONTENT_TYPE becomes a header.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/', 'CONTENT_TYPE': 'application/json', 'CONTENT_LENGTH': '0', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) headers_dict = dict(req.headers) assert headers_dict['CONTENT-TYPE'] == 'application/json' assert headers_dict['CONTENT-LENGTH'] == '0' def test_https_scheme(self): """Test scheme detection from HTTPS variable.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTPS': 'on', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.scheme == 'https' def test_wsgi_url_scheme(self): """Test scheme from wsgi.url_scheme variable.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'wsgi.url_scheme': 'https', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.scheme == 'https' def test_default_values(self): """Test default values when vars are missing.""" packet = make_uwsgi_packet({}) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.method == 'GET' assert req.path == '/' assert req.query == '' assert req.uri == '/' def test_uwsgi_vars_preserved(self): """Test that all vars are preserved in uwsgi_vars.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'SERVER_NAME': 'localhost', 'SERVER_PORT': '8000', 'CUSTOM_VAR': 'custom_value', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.uwsgi_vars['SERVER_NAME'] == 'localhost' assert req.uwsgi_vars['SERVER_PORT'] == '8000' assert req.uwsgi_vars['CUSTOM_VAR'] == 'custom_value' class TestUWSGIRequestErrors: """Test UWSGIRequest error handling.""" def test_incomplete_header(self): """Test error on incomplete header.""" unreader = IterUnreader([b'\x00\x00']) # Only 2 bytes cfg = MockConfig() with pytest.raises(InvalidUWSGIHeader) as exc_info: UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert 'incomplete header' in str(exc_info.value) def test_incomplete_vars_block(self): """Test error on truncated vars block.""" # Header says 100 bytes of vars, but we only provide 10 header = b'\x00\x64\x00\x00' # modifier1=0, size=100, modifier2=0 unreader = IterUnreader([header + b'1234567890']) cfg = MockConfig() with pytest.raises(InvalidUWSGIHeader) as exc_info: UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert 'incomplete vars block' in str(exc_info.value) def test_unsupported_modifier(self): """Test error on non-zero modifier1.""" packet = bytes([1]) + b'\x00\x00\x00' # modifier1=1 unreader = IterUnreader([packet]) cfg = MockConfig() with pytest.raises(UnsupportedModifier) as exc_info: UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert exc_info.value.modifier == 1 assert exc_info.value.code == 501 def test_truncated_key_size(self): """Test error on truncated key size.""" header = b'\x00\x01\x00\x00' # size=1, but need at least 2 bytes for key_size unreader = IterUnreader([header + b'X']) cfg = MockConfig() with pytest.raises(InvalidUWSGIHeader) as exc_info: UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert 'truncated' in str(exc_info.value) def test_forbidden_ip(self): """Test error when source IP not in allow list.""" packet = make_uwsgi_packet({'REQUEST_METHOD': 'GET', 'PATH_INFO': '/'}) unreader = IterUnreader([packet]) cfg = MockConfig(uwsgi_allow_ips=['192.168.1.1']) with pytest.raises(ForbiddenUWSGIRequest) as exc_info: UWSGIRequest(cfg, unreader, ('10.0.0.1', 12345)) assert exc_info.value.code == 403 assert '10.0.0.1' in str(exc_info.value) def test_allowed_ip_wildcard(self): """Test that wildcard allows any IP.""" packet = make_uwsgi_packet({'REQUEST_METHOD': 'GET', 'PATH_INFO': '/'}) unreader = IterUnreader([packet]) cfg = MockConfig(uwsgi_allow_ips=['*']) # Should not raise req = UWSGIRequest(cfg, unreader, ('10.0.0.1', 12345)) assert req.method == 'GET' def test_unix_socket_always_allowed(self): """Test that UNIX socket connections are always allowed.""" packet = make_uwsgi_packet({'REQUEST_METHOD': 'GET', 'PATH_INFO': '/'}) unreader = IterUnreader([packet]) cfg = MockConfig(uwsgi_allow_ips=['127.0.0.1']) # UNIX socket has non-tuple peer_addr req = UWSGIRequest(cfg, unreader, None) assert req.method == 'GET' class TestUWSGIRequestConnection: """Test connection handling.""" def test_should_close_default(self): """Test default keep-alive behavior.""" packet = make_uwsgi_packet({'REQUEST_METHOD': 'GET', 'PATH_INFO': '/'}) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.should_close() is False def test_should_close_connection_close(self): """Test Connection: close header.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTP_CONNECTION': 'close', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.should_close() is True def test_should_close_connection_keepalive(self): """Test Connection: keep-alive header.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/', 'HTTP_CONNECTION': 'keep-alive', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) assert req.should_close() is False def test_force_close(self): """Test force_close method.""" packet = make_uwsgi_packet({'REQUEST_METHOD': 'GET', 'PATH_INFO': '/'}) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) req.force_close() assert req.should_close() is True class TestUWSGIParser: """Test UWSGIParser.""" def test_parser_iteration(self): """Test iterating over parser for multiple requests.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/test', 'HTTP_CONNECTION': 'close', # Single request }) cfg = MockConfig() # Parser expects an iterable source, not an unreader parser = UWSGIParser(cfg, [packet], ('127.0.0.1', 12345)) req = next(parser) assert req.method == 'GET' assert req.path == '/test' def test_parser_mesg_class(self): """Test that parser uses UWSGIRequest.""" assert UWSGIParser.mesg_class is UWSGIRequest class TestExceptionStrings: """Test exception string representations.""" def test_invalid_uwsgi_header_str(self): exc = InvalidUWSGIHeader("test message") assert str(exc) == "Invalid uWSGI header: test message" assert exc.code == 400 def test_unsupported_modifier_str(self): exc = UnsupportedModifier(5) assert str(exc) == "Unsupported uWSGI modifier1: 5" assert exc.code == 501 def test_forbidden_uwsgi_request_str(self): exc = ForbiddenUWSGIRequest("10.0.0.1") assert str(exc) == "uWSGI request from '10.0.0.1' not allowed" assert exc.code == 403 class TestUWSGIBody: """Test body reading.""" def test_read_body_in_chunks(self): """Test reading body in multiple chunks.""" body = b'A' * 1000 packet = make_uwsgi_packet_with_body({ 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/', }, body) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) result = b'' chunk = req.body.read(100) while chunk: result += chunk chunk = req.body.read(100) assert result == body def test_invalid_content_length(self): """Test handling of invalid CONTENT_LENGTH.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/', 'CONTENT_LENGTH': 'invalid', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) # Invalid content length should default to 0 assert req.body.read() == b'' def test_negative_content_length(self): """Test handling of negative CONTENT_LENGTH.""" packet = make_uwsgi_packet({ 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/', 'CONTENT_LENGTH': '-5', }) unreader = IterUnreader([packet]) cfg = MockConfig() req = UWSGIRequest(cfg, unreader, ('127.0.0.1', 12345)) # Negative content length should default to 0 assert req.body.read() == b'' benoitc-gunicorn-f5fb19e/tests/test_valid_requests.py000066400000000000000000000011121514360242400232630ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import glob import os import pytest import treq dirname = os.path.dirname(__file__) reqdir = os.path.join(dirname, "requests", "valid") httpfiles = glob.glob(os.path.join(reqdir, "*.http")) @pytest.mark.parametrize("fname", httpfiles) def test_http_parser(fname): env = treq.load_py(os.path.splitext(fname)[0] + ".py") expect = env['request'] cfg = env['cfg'] req = treq.request(fname, expect) for case in req.gen_cases(cfg): case[0](*case[1:]) benoitc-gunicorn-f5fb19e/tests/treq.py000066400000000000000000000252371514360242400201630ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Copyright 2009 Paul J. Davis # # This file is part of the pywebmachine package released # under the MIT license. import inspect import importlib.machinery import os import random import types from gunicorn.config import Config from gunicorn.http.parser import RequestParser from gunicorn.util import split_request_uri dirname = os.path.dirname(__file__) random.seed() def uri(data): ret = {"raw": data} parts = split_request_uri(data) ret["scheme"] = parts.scheme or '' ret["host"] = parts.netloc.rsplit(":", 1)[0] or None ret["port"] = parts.port or 80 ret["path"] = parts.path or '' ret["query"] = parts.query or '' ret["fragment"] = parts.fragment or '' return ret def load_py(fname): module_name = '__config__' mod = types.ModuleType(module_name) setattr(mod, 'uri', uri) setattr(mod, 'cfg', Config()) loader = importlib.machinery.SourceFileLoader(module_name, fname) loader.exec_module(mod) return vars(mod) def decode_hex_escapes(data): """Decode hex escape sequences like \\xAB in test data.""" import re result = bytearray() i = 0 while i < len(data): # Check for \xHH hex escape if i + 3 < len(data) and data[i:i+2] == b'\\x': hex_chars = data[i+2:i+4] try: byte_val = int(hex_chars, 16) result.append(byte_val) i += 4 continue except ValueError: pass result.append(data[i]) i += 1 return bytes(result) class request: def __init__(self, fname, expect): self.fname = fname self.name = os.path.basename(fname) self.expect = expect if not isinstance(self.expect, list): self.expect = [self.expect] with open(self.fname, 'rb') as handle: self.data = handle.read() self.data = self.data.replace(b"\n", b"").replace(b"\\r\\n", b"\r\n") self.data = self.data.replace(b"\\0", b"\000").replace(b"\\n", b"\n").replace(b"\\t", b"\t") # Handle hex escape sequences for binary data (e.g., \x0D for PROXY v2) self.data = decode_hex_escapes(self.data) if b"\\" in self.data: raise AssertionError("Unexpected backslash in test data - only handling HTAB, NUL, CRLF, and hex escapes") # Functions for sending data to the parser. # These functions mock out reading from a # socket or other data source that might # be used in real life. def send_all(self): yield self.data def send_lines(self): lines = self.data pos = lines.find(b"\r\n") while pos > 0: yield lines[:pos+2] lines = lines[pos+2:] pos = lines.find(b"\r\n") if lines: yield lines def send_bytes(self): for d in self.data: yield bytes([d]) def send_random(self): maxs = round(len(self.data) / 10) read = 0 while read < len(self.data): chunk = random.randint(1, maxs) yield self.data[read:read+chunk] read += chunk def send_special_chunks(self): """Meant to test the request line length check. Sends the request data in two chunks, one having a length of 1 byte, which ensures that no CRLF is included, and a second chunk containing the rest of the request data. If the request line length check is not done properly, testing the ``tests/requests/valid/099.http`` request fails with a ``LimitRequestLine`` exception. """ chunk = self.data[:1] read = 0 while read < len(self.data): yield self.data[read:read+len(chunk)] read += len(chunk) chunk = self.data[read:] # These functions define the sizes that the # read functions will read with. def size_all(self): return -1 def size_bytes(self): return 1 def size_small_random(self): return random.randint(1, 4) def size_random(self): return random.randint(1, 4096) # Match a body against various ways of reading # a message. Pass in the request, expected body # and one of the size functions. def szread(self, func, sizes): sz = sizes() data = func(sz) if 0 <= sz < len(data): raise AssertionError("Read more than %d bytes: %s" % (sz, data)) return data def match_read(self, req, body, sizes): data = self.szread(req.body.read, sizes) count = 1000 while body: if body[:len(data)] != data: raise AssertionError("Invalid body data read: %r != %r" % ( data, body[:len(data)])) body = body[len(data):] data = self.szread(req.body.read, sizes) if not data: count -= 1 if count <= 0: raise AssertionError("Unexpected apparent EOF") if body: raise AssertionError("Failed to read entire body: %r" % body) elif data: raise AssertionError("Read beyond expected body: %r" % data) data = req.body.read(sizes()) if data: raise AssertionError("Read after body finished: %r" % data) def match_readline(self, req, body, sizes): data = self.szread(req.body.readline, sizes) count = 1000 while body: if body[:len(data)] != data: raise AssertionError("Invalid data read: %r" % data) if b'\n' in data[:-1]: raise AssertionError("Embedded new line: %r" % data) body = body[len(data):] data = self.szread(req.body.readline, sizes) if not data: count -= 1 if count <= 0: raise AssertionError("Apparent unexpected EOF") if body: raise AssertionError("Failed to read entire body: %r" % body) elif data: raise AssertionError("Read beyond expected body: %r" % data) data = req.body.readline(sizes()) if data: raise AssertionError("Read data after body finished: %r" % data) def match_readlines(self, req, body, sizes): """\ This skips the sizes checks as we don't implement it. """ data = req.body.readlines() for line in data: if b'\n' in line[:-1]: raise AssertionError("Embedded new line: %r" % line) if line != body[:len(line)]: raise AssertionError("Invalid body data read: %r != %r" % ( line, body[:len(line)])) body = body[len(line):] if body: raise AssertionError("Failed to read entire body: %r" % body) data = req.body.readlines(sizes()) if data: raise AssertionError("Read data after body finished: %r" % data) def match_iter(self, req, body, sizes): """\ This skips sizes because there's its not part of the iter api. """ for line in req.body: if b'\n' in line[:-1]: raise AssertionError("Embedded new line: %r" % line) if line != body[:len(line)]: raise AssertionError("Invalid body data read: %r != %r" % ( line, body[:len(line)])) body = body[len(line):] if body: raise AssertionError("Failed to read entire body: %r" % body) try: data = next(iter(req.body)) raise AssertionError("Read data after body finished: %r" % data) except StopIteration: pass # Construct a series of test cases from the permutations of # send, size, and match functions. def gen_cases(self, cfg): def get_funs(p): return [v for k, v in inspect.getmembers(self) if k.startswith(p)] senders = get_funs("send_") sizers = get_funs("size_") matchers = get_funs("match_") cfgs = [ (mt, sz, sn) for mt in matchers for sz in sizers for sn in senders ] ret = [] for (mt, sz, sn) in cfgs: if hasattr(mt, 'funcname'): mtn = mt.func_name[6:] szn = sz.func_name[5:] snn = sn.func_name[5:] else: mtn = mt.__name__[6:] szn = sz.__name__[5:] snn = sn.__name__[5:] def test_req(sn, sz, mt): self.check(cfg, sn, sz, mt) desc = "%s: MT: %s SZ: %s SN: %s" % (self.name, mtn, szn, snn) test_req.description = desc ret.append((test_req, sn, sz, mt)) return ret def check(self, cfg, sender, sizer, matcher): cases = self.expect[:] p = RequestParser(cfg, sender(), None) parsed_request_idx = -1 for parsed_request_idx, req in enumerate(p): self.same(req, sizer, matcher, cases.pop(0)) assert len(self.expect) == parsed_request_idx + 1 assert not cases def same(self, req, sizer, matcher, exp): assert req.method == exp["method"] assert req.uri == exp["uri"]["raw"] assert req.path == exp["uri"]["path"] assert req.query == exp["uri"]["query"] assert req.fragment == exp["uri"]["fragment"] assert req.version == exp["version"] assert req.headers == exp["headers"] matcher(req, exp["body"], sizer) assert req.trailers == exp.get("trailers", []) class badrequest: # FIXME: no good reason why this cannot match what the more extensive mechanism above def __init__(self, fname): self.fname = fname self.name = os.path.basename(fname) with open(self.fname) as handle: self.data = handle.read() self.data = self.data.replace("\n", "").replace("\\r\\n", "\r\n") self.data = self.data.replace("\\0", "\000").replace("\\n", "\n").replace("\\t", "\t") if "\\" in self.data: raise AssertionError("Unexpected backslash in test data - only handling HTAB, NUL and CRLF") self.data = self.data.encode('latin1') def send(self): maxs = round(len(self.data) / 10) read = 0 while read < len(self.data): chunk = random.randint(1, maxs) yield self.data[read:read+chunk] read += chunk def check(self, cfg): p = RequestParser(cfg, self.send(), None) # must fully consume iterator, otherwise EOF errors could go unnoticed for _ in p: pass benoitc-gunicorn-f5fb19e/tests/workers/000077500000000000000000000000001514360242400203215ustar00rootroot00000000000000benoitc-gunicorn-f5fb19e/tests/workers/__init__.py000066400000000000000000000001521514360242400224300ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. benoitc-gunicorn-f5fb19e/tests/workers/test_gevent_import_order.py000066400000000000000000000334711514360242400260170ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. """ Test for gevent worker compatibility with concurrent.futures import order. Issue: https://github.com/benoitc/gunicorn/issues/3482 Discussion: https://github.com/benoitc/gunicorn/discussions/3481 Gist: https://gist.github.com/markjm/9f724364619c519892e8111fe6520ca6 When using gevent workers, `concurrent.futures` must not be imported before `gevent.monkey.patch_all()` is called. If it is, certain thread locks in concurrent.futures will not be properly patched, leading to issues with libraries like boto3 that use concurrent.futures internally. In gunicorn v25, the import of gunicorn.arbiter triggered the import of gunicorn.dirty, which imports concurrent.futures via asyncio. This happened before user code (like a config file with monkey.patch_all()) could run. The fix was to make the dirty module imports lazy - only importing when dirty workers are actually being started (in spawn_dirty_arbiter()). """ import subprocess import sys import textwrap import pytest try: import gevent HAS_GEVENT = True except ImportError: HAS_GEVENT = False pytestmark = pytest.mark.skipif(not HAS_GEVENT, reason="gevent not installed") class TestConcurrentFuturesImportOrder: """Test that concurrent.futures import timing doesn't break gevent patching.""" def test_concurrent_futures_not_imported_by_arbiter(self): """Test that importing gunicorn.arbiter does NOT import concurrent.futures. The dirty module (which uses asyncio and concurrent.futures) is now imported lazily to avoid breaking gevent patching. See: https://github.com/benoitc/gunicorn/discussions/3481 """ # Run in a subprocess to ensure clean import state code = textwrap.dedent(""" import sys # Verify concurrent.futures is not imported yet assert 'concurrent.futures' not in sys.modules, \ "concurrent.futures should not be imported yet" # Import gunicorn.arbiter import gunicorn.arbiter # Check if concurrent.futures is now imported cf_imported = 'concurrent.futures' in sys.modules print(f"RESULT:concurrent_futures_imported={cf_imported}") """) result = subprocess.run( [sys.executable, "-c", code], capture_output=True, text=True ) # Parse the result stdout = result.stdout.strip() assert "RESULT:concurrent_futures_imported=" in stdout, \ f"Test script failed: stderr={result.stderr}" imported = stdout.split("RESULT:concurrent_futures_imported=")[1] == "True" # concurrent.futures should NOT be imported by gunicorn.arbiter # The dirty module is now imported lazily assert not imported, ( "concurrent.futures should NOT be imported when gunicorn.arbiter is imported. " "The dirty module should be imported lazily." ) def test_gevent_patch_after_concurrent_futures_import_leaves_unpatched_lock(self): """Test that patching after concurrent.futures import leaves locks unpatched. This reproduces the issue from the gist where the _global_shutdown_lock in concurrent.futures.thread is not properly patched if concurrent.futures is imported before monkey.patch_all(). """ # Run in a subprocess to ensure clean import state code = textwrap.dedent(""" import sys # Simulate what happens with gunicorn v25: # concurrent.futures is imported BEFORE gevent patching import concurrent.futures from concurrent.futures import thread as futures_thread # Get a reference to the lock BEFORE patching lock_before_patch = futures_thread._global_shutdown_lock # Now apply gevent patching (simulating user's config file) from gevent import monkey monkey.patch_all() # Get the lock type AFTER patching from gevent.thread import LockType as GeventLockType # Check if the lock is a gevent lock is_gevent_lock = isinstance(lock_before_patch, GeventLockType) lock_type = type(lock_before_patch).__module__ print(f"RESULT:is_gevent_lock={is_gevent_lock}") print(f"RESULT:lock_module={lock_type}") """) result = subprocess.run( [sys.executable, "-c", code], capture_output=True, text=True ) stdout = result.stdout.strip() assert "RESULT:is_gevent_lock=" in stdout, \ f"Test script failed: stderr={result.stderr}" # Parse results lines = stdout.split("\n") is_gevent_lock = None lock_module = None for line in lines: if line.startswith("RESULT:is_gevent_lock="): is_gevent_lock = line.split("=")[1] == "True" elif line.startswith("RESULT:lock_module="): lock_module = line.split("=")[1] # Document: when concurrent.futures is imported before patching, # the _global_shutdown_lock is NOT a gevent lock - this is the bug assert is_gevent_lock is False, ( "Lock should NOT be a gevent lock when concurrent.futures " "was imported before patching. If this fails, gevent may have " "improved their patching." ) assert lock_module == "_thread", ( f"Lock module should be _thread (unpatched), got {lock_module}" ) def test_gevent_patch_before_concurrent_futures_import_patches_lock(self): """Test that patching BEFORE concurrent.futures import works correctly. This shows the correct behavior: when monkey.patch_all() is called BEFORE importing concurrent.futures, the locks are properly patched. """ # Run in a subprocess to ensure clean import state code = textwrap.dedent(""" import sys # Apply gevent patching FIRST (correct order) from gevent import monkey monkey.patch_all() # Now import concurrent.futures import concurrent.futures from concurrent.futures import thread as futures_thread # Get a reference to the lock lock = futures_thread._global_shutdown_lock # Check if the lock is a gevent lock from gevent.thread import LockType as GeventLockType is_gevent_lock = isinstance(lock, GeventLockType) lock_type = type(lock).__module__ print(f"RESULT:is_gevent_lock={is_gevent_lock}") print(f"RESULT:lock_module={lock_type}") """) result = subprocess.run( [sys.executable, "-c", code], capture_output=True, text=True ) stdout = result.stdout.strip() assert "RESULT:is_gevent_lock=" in stdout, \ f"Test script failed: stderr={result.stderr}" # Parse results lines = stdout.split("\n") is_gevent_lock = None lock_module = None for line in lines: if line.startswith("RESULT:is_gevent_lock="): is_gevent_lock = line.split("=")[1] == "True" elif line.startswith("RESULT:lock_module="): lock_module = line.split("=")[1] # When patching happens BEFORE import, locks are properly patched assert is_gevent_lock is True, ( "Lock should be a gevent lock when patching happens before import" ) assert lock_module == "gevent.thread", ( f"Lock module should be gevent.thread, got {lock_module}" ) def test_gunicorn_gevent_worker_patching_works(self): """Integration test verifying gevent patching works with gunicorn. This simulates what happens when: 1. User starts gunicorn with gevent worker 2. gunicorn.arbiter is imported (does NOT import concurrent.futures) 3. User's config file runs with monkey.patch_all() 4. concurrent.futures is imported later (after patching) The result: concurrent.futures locks ARE properly patched. """ code = textwrap.dedent(""" import sys # Step 1: User starts gunicorn - gunicorn.arbiter gets imported # With the lazy import fix, this does NOT import concurrent.futures import gunicorn.arbiter # Step 2: Verify concurrent.futures was NOT imported yet assert 'concurrent.futures' not in sys.modules, \ "concurrent.futures should NOT have been imported by arbiter" # Step 3: Now user's config file runs with monkey.patch_all() # This happens BEFORE concurrent.futures is imported - correct order! from gevent import monkey monkey.patch_all() # Step 4: Now import concurrent.futures (after patching) from concurrent.futures import thread as futures_thread lock = futures_thread._global_shutdown_lock from gevent.thread import LockType as GeventLockType is_gevent_lock = isinstance(lock, GeventLockType) print(f"RESULT:is_gevent_lock={is_gevent_lock}") print(f"RESULT:lock_type={type(lock)}") """) result = subprocess.run( [sys.executable, "-c", code], capture_output=True, text=True ) stdout = result.stdout.strip() stderr = result.stderr.strip() # Allow for the test to run even if gevent isn't available in subprocess if "ModuleNotFoundError" in stderr or "ImportError" in stderr: pytest.skip("gevent not available in subprocess") assert "RESULT:is_gevent_lock=" in stdout, \ f"Test script failed: stdout={stdout}, stderr={stderr}" is_gevent_lock = "RESULT:is_gevent_lock=True" in stdout # The lock IS properly patched because: # 1. gunicorn.arbiter no longer imports concurrent.futures at module load # 2. monkey.patch_all() runs before concurrent.futures is imported # 3. concurrent.futures gets the patched threading primitives assert is_gevent_lock is True, ( "Lock should be a gevent lock when gunicorn.arbiter is imported " "before monkey.patch_all() - the dirty module should be lazily imported." ) def test_gevent_config_file_patching_scenario(self): """Test the exact scenario from the bug report gist. This reproduces the test case from: https://gist.github.com/markjm/9f724364619c519892e8111fe6520ca6 The gist simulates a gunicorn config file that: 1. Calls monkey.patch_all() 2. Checks if locks in concurrent.futures are properly patched With the fix, both locks (before and after importing concurrent.futures) should be gevent locks because monkey.patch_all() runs before any concurrent.futures import. """ code = textwrap.dedent(""" import sys # Simulate gunicorn startup - import arbiter first # (this should NOT import concurrent.futures anymore) import gunicorn.arbiter # === This simulates a gunicorn config file (like echo.py from the gist) === # Config file starts by patching from gevent import monkey monkey.patch_all() # print("[INFO] gevent.monkey.patch_all() called") # Now access concurrent.futures (after patching) from concurrent.futures import thread as futures_thread lock_after_patch = futures_thread._global_shutdown_lock # Also create a new lock to compare import threading new_lock = threading.Lock() from gevent.thread import LockType as GeventLockType import _thread # Check both locks after_is_gevent = isinstance(lock_after_patch, GeventLockType) after_module = type(lock_after_patch).__module__ new_is_gevent = isinstance(new_lock, GeventLockType) new_module = type(new_lock).__module__ # Print comparison table like the gist print("=== LOCK COMPARISON TABLE ===") print(f"CF Lock Type: {type(lock_after_patch)}") print(f"CF Lock Module: {after_module}") print(f"CF Is GeventLockType: {after_is_gevent}") print(f"New Lock Type: {type(new_lock)}") print(f"New Lock Module: {new_module}") print(f"New Is GeventLockType: {new_is_gevent}") # Results for parsing print(f"RESULT:cf_is_gevent={after_is_gevent}") print(f"RESULT:cf_module={after_module}") print(f"RESULT:new_is_gevent={new_is_gevent}") """) result = subprocess.run( [sys.executable, "-c", code], capture_output=True, text=True ) stdout = result.stdout.strip() stderr = result.stderr.strip() if "ModuleNotFoundError" in stderr or "ImportError" in stderr: pytest.skip("gevent not available in subprocess") assert "RESULT:cf_is_gevent=" in stdout, \ f"Test script failed: stdout={stdout}, stderr={stderr}" # Parse results cf_is_gevent = "RESULT:cf_is_gevent=True" in stdout new_is_gevent = "RESULT:new_is_gevent=True" in stdout # With the fix, BOTH locks should be gevent locks # This matches the expected v24 behavior from the gist assert cf_is_gevent is True, ( "concurrent.futures lock should be a gevent lock. " "This indicates monkey.patch_all() ran before concurrent.futures was imported." ) assert new_is_gevent is True, ( "New threading.Lock should be a gevent lock after monkey.patch_all()" ) benoitc-gunicorn-f5fb19e/tests/workers/test_geventlet.py000066400000000000000000000351301514360242400237310ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import pytest import sys from unittest import mock def test_import(): """Test that the eventlet worker module can be imported.""" try: import eventlet except AttributeError: if (3, 13) > sys.version_info >= (3, 12): pytest.skip("Ignoring eventlet failures on Python 3.12") raise __import__('gunicorn.workers.geventlet') class TestVersionRequirement: """Tests for eventlet version requirement checks.""" def test_import_error_message(self): """Test that ImportError gives correct version message.""" with mock.patch.dict('sys.modules', {'eventlet': None}): # Clear cached module if present sys.modules.pop('gunicorn.workers.geventlet', None) with pytest.raises(RuntimeError, match="eventlet 0.40.3"): import importlib import gunicorn.workers.geventlet importlib.reload(gunicorn.workers.geventlet) def test_version_check_requires_0_40_3(self): """Test that version check requires eventlet 0.40.3 or higher.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from packaging.version import parse as parse_version min_version = parse_version('0.40.3') current_version = parse_version(eventlet.__version__) # If we got this far, the import succeeded, meaning version is sufficient assert current_version >= min_version @pytest.fixture def eventlet_worker(): """Fixture to create an EventletWorker instance for testing.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import EventletWorker # Create a minimal mock config cfg = mock.MagicMock() cfg.keepalive = 2 cfg.graceful_timeout = 30 cfg.is_ssl = False cfg.worker_connections = 1000 # Create worker with mocked dependencies worker = EventletWorker.__new__(EventletWorker) worker.cfg = cfg worker.alive = True worker.sockets = [] worker.log = mock.MagicMock() return worker class TestEventletWorker: """Tests for EventletWorker class.""" def test_worker_class_exists(self): """Test that EventletWorker class is properly defined.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import EventletWorker from gunicorn.workers.base_async import AsyncWorker assert issubclass(EventletWorker, AsyncWorker) def test_patch_method_calls_use_hub(self, eventlet_worker): """Test that patch() calls hubs.use_hub(). hubs.use_hub() must be called in patch() (after fork) because it creates OS resources like kqueue that don't survive fork. """ from eventlet import hubs with mock.patch.object(hubs, 'use_hub') as mock_use_hub: with mock.patch('gunicorn.workers.geventlet.patch_sendfile'): eventlet_worker.patch() mock_use_hub.assert_called_once() def test_patch_method_calls_patch_sendfile(self, eventlet_worker): """Test that patch() calls patch_sendfile().""" from eventlet import hubs with mock.patch.object(hubs, 'use_hub'): with mock.patch('gunicorn.workers.geventlet.patch_sendfile') as mock_sf: eventlet_worker.patch() mock_sf.assert_called_once() def test_monkey_patch_called_at_import_time(self): """Test that monkey_patch is called at module import time. Note: hubs.use_hub() and eventlet.monkey_patch() are called at module import time (not in patch()) to ensure all imports are properly patched. This test verifies the module was patched by checking eventlet state. """ try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") # Verify eventlet has been patched by checking that socket is patched import socket from eventlet.greenio import GreenSocket # After monkey patching, socket.socket should be GreenSocket assert socket.socket is GreenSocket def test_timeout_ctx_returns_eventlet_timeout(self, eventlet_worker): """Test that timeout_ctx() returns an eventlet.Timeout.""" import eventlet timeout = eventlet_worker.timeout_ctx() assert isinstance(timeout, eventlet.Timeout) def test_timeout_ctx_uses_keepalive_config(self, eventlet_worker): """Test that timeout_ctx() uses cfg.keepalive value.""" import eventlet eventlet_worker.cfg.keepalive = 5 with mock.patch.object(eventlet, 'Timeout') as mock_timeout: eventlet_worker.timeout_ctx() mock_timeout.assert_called_once_with(5, False) def test_timeout_ctx_with_no_keepalive(self, eventlet_worker): """Test that timeout_ctx() handles no keepalive (None or 0).""" import eventlet eventlet_worker.cfg.keepalive = 0 with mock.patch.object(eventlet, 'Timeout') as mock_timeout: eventlet_worker.timeout_ctx() mock_timeout.assert_called_once_with(None, False) def test_handle_quit_spawns_greenthread(self, eventlet_worker): """Test that handle_quit() spawns a greenthread.""" import eventlet with mock.patch.object(eventlet, 'spawn') as mock_spawn: eventlet_worker.handle_quit(None, None) mock_spawn.assert_called_once() def test_handle_usr1_spawns_greenthread(self, eventlet_worker): """Test that handle_usr1() spawns a greenthread.""" import eventlet with mock.patch.object(eventlet, 'spawn') as mock_spawn: eventlet_worker.handle_usr1(None, None) mock_spawn.assert_called_once() def test_handle_wraps_ssl_when_configured(self, eventlet_worker): """Test that handle() wraps socket with SSL when is_ssl is True.""" from gunicorn.workers import geventlet eventlet_worker.cfg.is_ssl = True mock_client = mock.MagicMock() mock_listener = mock.MagicMock() with mock.patch.object(geventlet, 'ssl_wrap_socket') as mock_ssl: mock_ssl.return_value = mock_client with mock.patch('gunicorn.workers.base_async.AsyncWorker.handle'): eventlet_worker.handle(mock_listener, mock_client, ('127.0.0.1', 8000)) mock_ssl.assert_called_once_with(mock_client, eventlet_worker.cfg) def test_handle_no_ssl_when_not_configured(self, eventlet_worker): """Test that handle() does not wrap SSL when is_ssl is False.""" from gunicorn.workers import geventlet eventlet_worker.cfg.is_ssl = False mock_client = mock.MagicMock() mock_listener = mock.MagicMock() with mock.patch.object(geventlet, 'ssl_wrap_socket') as mock_ssl: with mock.patch('gunicorn.workers.base_async.AsyncWorker.handle'): eventlet_worker.handle(mock_listener, mock_client, ('127.0.0.1', 8000)) mock_ssl.assert_not_called() class TestAlreadyHandled: """Tests for is_already_handled() method.""" def test_is_already_handled_new_style(self, eventlet_worker): """Test is_already_handled with eventlet >= 0.30.3 (WSGI_LOCAL).""" from gunicorn.workers import geventlet # Mock the new-style WSGI_LOCAL.already_handled mock_wsgi_local = mock.MagicMock() mock_wsgi_local.already_handled = True with mock.patch.object(geventlet, 'EVENTLET_WSGI_LOCAL', mock_wsgi_local): with pytest.raises(StopIteration): eventlet_worker.is_already_handled(mock.MagicMock()) def test_is_already_handled_old_style(self, eventlet_worker): """Test is_already_handled with eventlet < 0.30.3 (ALREADY_HANDLED).""" from gunicorn.workers import geventlet sentinel = object() with mock.patch.object(geventlet, 'EVENTLET_WSGI_LOCAL', None): with mock.patch.object(geventlet, 'EVENTLET_ALREADY_HANDLED', sentinel): with pytest.raises(StopIteration): eventlet_worker.is_already_handled(sentinel) def test_is_already_handled_returns_parent_result(self, eventlet_worker): """Test is_already_handled falls through to parent when not handled.""" from gunicorn.workers import geventlet with mock.patch.object(geventlet, 'EVENTLET_WSGI_LOCAL', None): with mock.patch.object(geventlet, 'EVENTLET_ALREADY_HANDLED', None): with mock.patch('gunicorn.workers.base_async.AsyncWorker.is_already_handled') as mock_parent: mock_parent.return_value = False result = eventlet_worker.is_already_handled(mock.MagicMock()) assert result is False mock_parent.assert_called_once() class TestPatchSendfile: """Tests for patch_sendfile() function.""" def test_patch_sendfile_adds_method_when_missing(self): """Test that patch_sendfile adds sendfile to GreenSocket if missing.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import patch_sendfile, _eventlet_socket_sendfile from eventlet.greenio import GreenSocket # Remove sendfile if it exists original = getattr(GreenSocket, 'sendfile', None) if hasattr(GreenSocket, 'sendfile'): delattr(GreenSocket, 'sendfile') try: patch_sendfile() assert hasattr(GreenSocket, 'sendfile') assert GreenSocket.sendfile == _eventlet_socket_sendfile finally: # Restore original state if original is not None: GreenSocket.sendfile = original elif hasattr(GreenSocket, 'sendfile'): delattr(GreenSocket, 'sendfile') def test_patch_sendfile_preserves_existing_method(self): """Test that patch_sendfile does not override existing sendfile.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import patch_sendfile from eventlet.greenio import GreenSocket # If sendfile exists, it should be preserved if hasattr(GreenSocket, 'sendfile'): original = GreenSocket.sendfile patch_sendfile() assert GreenSocket.sendfile == original class TestEventletSocketSendfile: """Tests for _eventlet_socket_sendfile() function.""" def test_sendfile_raises_on_non_blocking(self): """Test that sendfile raises ValueError for non-blocking sockets.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import _eventlet_socket_sendfile mock_socket = mock.MagicMock() mock_socket.gettimeout.return_value = 0 with pytest.raises(ValueError, match="non-blocking"): _eventlet_socket_sendfile(mock_socket, mock.MagicMock()) def test_sendfile_seeks_to_offset(self): """Test that sendfile seeks to offset if provided.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import _eventlet_socket_sendfile mock_socket = mock.MagicMock() mock_socket.gettimeout.return_value = 1 mock_file = mock.MagicMock() mock_file.read.return_value = b'' _eventlet_socket_sendfile(mock_socket, mock_file, offset=100) mock_file.seek.assert_any_call(100) def test_sendfile_returns_total_sent(self): """Test that sendfile returns the total bytes sent.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import _eventlet_socket_sendfile mock_socket = mock.MagicMock() mock_socket.gettimeout.return_value = 1 mock_socket.send.return_value = 10 mock_file = mock.MagicMock() mock_file.read.side_effect = [b'x' * 10, b''] result = _eventlet_socket_sendfile(mock_socket, mock_file) assert result == 10 class TestEventletServe: """Tests for _eventlet_serve() function.""" def test_serve_creates_green_pool(self): """Test that _eventlet_serve creates a GreenPool.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import _eventlet_serve mock_sock = mock.MagicMock() mock_sock.accept.side_effect = eventlet.StopServe() with mock.patch.object(eventlet.greenpool, 'GreenPool') as mock_pool: mock_pool_instance = mock.MagicMock() mock_pool.return_value = mock_pool_instance mock_pool_instance.waitall.return_value = None _eventlet_serve(mock_sock, mock.MagicMock(), 100) mock_pool.assert_called_once_with(100) class TestEventletStop: """Tests for _eventlet_stop() function.""" def test_stop_waits_for_client(self): """Test that _eventlet_stop waits for the client greenlet.""" try: import eventlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import _eventlet_stop mock_client = mock.MagicMock() mock_server = mock.MagicMock() mock_conn = mock.MagicMock() _eventlet_stop(mock_client, mock_server, mock_conn) mock_client.wait.assert_called_once() mock_conn.close.assert_called_once() def test_stop_closes_connection_on_greenlet_exit(self): """Test that connection is closed even on GreenletExit.""" try: import eventlet import greenlet except (ImportError, AttributeError): pytest.skip("eventlet not available") from gunicorn.workers.geventlet import _eventlet_stop mock_client = mock.MagicMock() mock_client.wait.side_effect = greenlet.GreenletExit() mock_server = mock.MagicMock() mock_conn = mock.MagicMock() # Should not raise _eventlet_stop(mock_client, mock_server, mock_conn) mock_conn.close.assert_called_once() benoitc-gunicorn-f5fb19e/tests/workers/test_ggevent.py000066400000000000000000000175221514360242400234000ustar00rootroot00000000000000# # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. from unittest import mock import pytest try: import gevent HAS_GEVENT = True except ImportError: HAS_GEVENT = False pytestmark = pytest.mark.skipif(not HAS_GEVENT, reason="gevent not installed") def test_import(): __import__('gunicorn.workers.ggevent') def test_version_requirement(): """Test that gevent 24.10.1+ is required.""" from gunicorn.workers import ggevent from packaging.version import parse as parse_version assert parse_version(gevent.__version__) >= parse_version('24.10.1') class TestGeventWorkerInit: """Test GeventWorker initialization.""" def test_worker_has_no_server_class(self): """Test that GeventWorker has no server_class by default.""" from gunicorn.workers.ggevent import GeventWorker assert GeventWorker.server_class is None def test_worker_has_no_wsgi_handler(self): """Test that GeventWorker has no wsgi_handler by default.""" from gunicorn.workers.ggevent import GeventWorker assert GeventWorker.wsgi_handler is None def test_init_process_patches_and_reinits(self): """Test that init_process calls patch and reinits the hub.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) worker.sockets = [] with mock.patch('gunicorn.workers.ggevent.hub') as mock_hub, \ mock.patch.object(GeventWorker.__bases__[0], 'init_process'): GeventWorker.init_process(worker) # Verify patch was called worker.patch.assert_called_once() mock_hub.reinit.assert_called_once() class TestGeventWorkerRun: """Test GeventWorker run method.""" def test_run_creates_stream_servers(self): """Test that run creates StreamServer instances for each socket.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) worker.sockets = [mock.Mock()] worker.cfg = mock.Mock(is_ssl=False, workers=1, graceful_timeout=30) worker.server_class = None worker.worker_connections = 1000 # Make alive return True once, then False to exit the loop worker.alive = False with mock.patch('gunicorn.workers.ggevent.Pool') as mock_pool, \ mock.patch('gunicorn.workers.ggevent.StreamServer') as mock_server_cls, \ mock.patch('gunicorn.workers.ggevent.gevent') as mock_gevent: mock_server = mock.Mock() mock_server.pool = mock.Mock() mock_server.pool.free_count.return_value = mock_server.pool.size mock_server_cls.return_value = mock_server GeventWorker.run(worker) mock_server_cls.assert_called_once() mock_server.start.assert_called_once() mock_server.close.assert_called_once() def test_run_with_ssl(self): """Test that run configures SSL context when is_ssl is True.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) worker.sockets = [mock.Mock()] worker.cfg = mock.Mock(is_ssl=True, workers=1, graceful_timeout=30) worker.server_class = None worker.worker_connections = 1000 worker.alive = False with mock.patch('gunicorn.workers.ggevent.Pool'), \ mock.patch('gunicorn.workers.ggevent.StreamServer') as mock_server_cls, \ mock.patch('gunicorn.workers.ggevent.gevent'), \ mock.patch('gunicorn.workers.ggevent.ssl_context') as mock_ssl_ctx: mock_server = mock.Mock() mock_server.pool = mock.Mock() mock_server.pool.free_count.return_value = mock_server.pool.size mock_server_cls.return_value = mock_server mock_ssl_ctx.return_value = mock.Mock() GeventWorker.run(worker) mock_ssl_ctx.assert_called_once_with(worker.cfg) # Verify ssl_context was passed to StreamServer call_kwargs = mock_server_cls.call_args[1] assert 'ssl_context' in call_kwargs class TestSignalHandling: """Test signal handling in GeventWorker.""" def test_handle_quit_spawns_greenlet(self): """Test that handle_quit spawns a greenlet instead of blocking.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) with mock.patch('gunicorn.workers.ggevent.gevent') as mock_gevent: GeventWorker.handle_quit(worker, mock.Mock(), mock.Mock()) mock_gevent.spawn.assert_called_once() def test_handle_usr1_spawns_greenlet(self): """Test that handle_usr1 spawns a greenlet instead of blocking.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) with mock.patch('gunicorn.workers.ggevent.gevent') as mock_gevent: GeventWorker.handle_usr1(worker, mock.Mock(), mock.Mock()) mock_gevent.spawn.assert_called_once() def test_notify_exits_on_parent_change(self): """Test that notify exits when parent PID changes.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) worker.ppid = 1234 worker.log = mock.Mock() with mock.patch('gunicorn.workers.ggevent.os') as mock_os, \ mock.patch.object(GeventWorker.__bases__[0], 'notify'): mock_os.getppid.return_value = 5678 # Different PID with pytest.raises(SystemExit): GeventWorker.notify(worker) class TestPyWSGIWorker: """Test PyWSGI-based worker classes.""" def test_pywsgi_worker_has_server_class(self): """Test that GeventPyWSGIWorker has proper server_class.""" from gunicorn.workers.ggevent import GeventPyWSGIWorker, PyWSGIServer assert GeventPyWSGIWorker.server_class is PyWSGIServer def test_pywsgi_worker_has_handler(self): """Test that GeventPyWSGIWorker has proper wsgi_handler.""" from gunicorn.workers.ggevent import GeventPyWSGIWorker, PyWSGIHandler assert GeventPyWSGIWorker.wsgi_handler is PyWSGIHandler def test_pywsgi_handler_get_environ(self): """Test that PyWSGIHandler adds gunicorn-specific environ keys.""" from gunicorn.workers.ggevent import PyWSGIHandler handler = mock.Mock(spec=PyWSGIHandler) handler.socket = mock.Mock() handler.path = '/test/path' # Mock the parent get_environ with mock.patch.object(PyWSGIHandler.__bases__[0], 'get_environ', return_value={}): env = PyWSGIHandler.get_environ(handler) assert env['gunicorn.sock'] == handler.socket assert env['RAW_URI'] == '/test/path' class TestGeventResponse: """Test GeventResponse helper class.""" def test_response_attributes(self): """Test GeventResponse stores status, headers, and sent.""" from gunicorn.workers.ggevent import GeventResponse resp = GeventResponse('200 OK', {'Content-Type': 'text/html'}, 1024) assert resp.status == '200 OK' assert resp.headers == {'Content-Type': 'text/html'} assert resp.sent == 1024 class TestTimeoutContext: """Test timeout context manager.""" def test_timeout_ctx_uses_keepalive(self): """Test that timeout_ctx uses cfg.keepalive.""" from gunicorn.workers.ggevent import GeventWorker worker = mock.Mock(spec=GeventWorker) worker.cfg = mock.Mock(keepalive=30) with mock.patch('gunicorn.workers.ggevent.gevent') as mock_gevent: mock_timeout = mock.Mock() mock_gevent.Timeout.return_value = mock_timeout result = GeventWorker.timeout_ctx(worker) mock_gevent.Timeout.assert_called_once_with(30, False) assert result == mock_timeout benoitc-gunicorn-f5fb19e/tox.ini000066400000000000000000000026771514360242400170120ustar00rootroot00000000000000[tox] envlist = py{312,313}, lint, pycodestyle, run-entrypoint, run-module, [testenv] package = editable commands = pytest --cov=gunicorn {posargs} deps = -rrequirements_test.txt [testenv:run-entrypoint] package = wheel deps = # entry point: console script (provided by setuptools from pyproject.toml) commands = python -c 'import subprocess; cmd_out = subprocess.check_output(["gunicorn", "--version"])[:79].decode("utf-8", errors="replace"); print(cmd_out); assert cmd_out.startswith("gunicorn ")' [testenv:run-module] package = wheel deps = # runpy (provided by module.__main__) commands = python -c 'import sys,subprocess; cmd_out = subprocess.check_output([sys.executable, "-m", "gunicorn", "--version"])[:79].decode("utf-8", errors="replace"); print(cmd_out); assert cmd_out.startswith("gunicorn ")' [testenv:lint] no_package = true commands = pylint -j0 \ --max-line-length=120 \ gunicorn \ tests/test_arbiter.py \ tests/test_config.py \ tests/test_gthread.py \ tests/test_http.py \ tests/test_invalid_requests.py \ tests/test_logger.py \ tests/test_pidfile.py \ tests/test_sock.py \ tests/test_ssl.py \ tests/test_statsd.py \ tests/test_systemd.py \ tests/test_util.py \ tests/test_valid_requests.py deps = pylint==3.3.2 [testenv:pycodestyle] no_package = true commands = pycodestyle gunicorn deps = pycodestyle [pycodestyle] max-line-length = 120 ignore = E129,W503,W504,W606 benoitc-gunicorn-f5fb19e/uv.lock000066400000000000000000004017061514360242400167770ustar00rootroot00000000000000version = 1 revision = 3 requires-python = ">=3.10" [[package]] name = "backports-asyncio-runner" version = "1.2.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, ] [[package]] name = "cffi" version = "2.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pycparser", marker = "implementation_name != 'PyPy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] name = "coverage" version = "7.13.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/23/f9/e92df5e07f3fc8d4c7f9a0f146ef75446bf870351cd37b788cf5897f8079/coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd", size = 825862, upload-time = "2025-12-28T15:42:56.969Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/2d/9a/3742e58fd04b233df95c012ee9f3dfe04708a5e1d32613bd2d47d4e1be0d/coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147", size = 218633, upload-time = "2025-12-28T15:40:10.165Z" }, { url = "https://files.pythonhosted.org/packages/7e/45/7e6bdc94d89cd7c8017ce735cf50478ddfe765d4fbf0c24d71d30ea33d7a/coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d", size = 219147, upload-time = "2025-12-28T15:40:12.069Z" }, { url = "https://files.pythonhosted.org/packages/f7/38/0d6a258625fd7f10773fe94097dc16937a5f0e3e0cdf3adef67d3ac6baef/coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0", size = 245894, upload-time = "2025-12-28T15:40:13.556Z" }, { url = "https://files.pythonhosted.org/packages/27/58/409d15ea487986994cbd4d06376e9860e9b157cfbfd402b1236770ab8dd2/coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90", size = 247721, upload-time = "2025-12-28T15:40:15.37Z" }, { url = "https://files.pythonhosted.org/packages/da/bf/6e8056a83fd7a96c93341f1ffe10df636dd89f26d5e7b9ca511ce3bcf0df/coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d", size = 249585, upload-time = "2025-12-28T15:40:17.226Z" }, { url = "https://files.pythonhosted.org/packages/f4/15/e1daff723f9f5959acb63cbe35b11203a9df77ee4b95b45fffd38b318390/coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b", size = 246597, upload-time = "2025-12-28T15:40:19.028Z" }, { url = "https://files.pythonhosted.org/packages/74/a6/1efd31c5433743a6ddbc9d37ac30c196bb07c7eab3d74fbb99b924c93174/coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6", size = 247626, upload-time = "2025-12-28T15:40:20.846Z" }, { url = "https://files.pythonhosted.org/packages/6d/9f/1609267dd3e749f57fdd66ca6752567d1c13b58a20a809dc409b263d0b5f/coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e", size = 245629, upload-time = "2025-12-28T15:40:22.397Z" }, { url = "https://files.pythonhosted.org/packages/e2/f6/6815a220d5ec2466383d7cc36131b9fa6ecbe95c50ec52a631ba733f306a/coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae", size = 245901, upload-time = "2025-12-28T15:40:23.836Z" }, { url = "https://files.pythonhosted.org/packages/ac/58/40576554cd12e0872faf6d2c0eb3bc85f71d78427946ddd19ad65201e2c0/coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29", size = 246505, upload-time = "2025-12-28T15:40:25.421Z" }, { url = "https://files.pythonhosted.org/packages/3b/77/9233a90253fba576b0eee81707b5781d0e21d97478e5377b226c5b096c0f/coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f", size = 221257, upload-time = "2025-12-28T15:40:27.217Z" }, { url = "https://files.pythonhosted.org/packages/e0/43/e842ff30c1a0a623ec80db89befb84a3a7aad7bfe44a6ea77d5a3e61fedd/coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1", size = 222191, upload-time = "2025-12-28T15:40:28.916Z" }, { url = "https://files.pythonhosted.org/packages/b4/9b/77baf488516e9ced25fc215a6f75d803493fc3f6a1a1227ac35697910c2a/coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88", size = 218755, upload-time = "2025-12-28T15:40:30.812Z" }, { url = "https://files.pythonhosted.org/packages/d7/cd/7ab01154e6eb79ee2fab76bf4d89e94c6648116557307ee4ebbb85e5c1bf/coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3", size = 219257, upload-time = "2025-12-28T15:40:32.333Z" }, { url = "https://files.pythonhosted.org/packages/01/d5/b11ef7863ffbbdb509da0023fad1e9eda1c0eaea61a6d2ea5b17d4ac706e/coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9", size = 249657, upload-time = "2025-12-28T15:40:34.1Z" }, { url = "https://files.pythonhosted.org/packages/f7/7c/347280982982383621d29b8c544cf497ae07ac41e44b1ca4903024131f55/coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee", size = 251581, upload-time = "2025-12-28T15:40:36.131Z" }, { url = "https://files.pythonhosted.org/packages/82/f6/ebcfed11036ade4c0d75fa4453a6282bdd225bc073862766eec184a4c643/coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf", size = 253691, upload-time = "2025-12-28T15:40:37.626Z" }, { url = "https://files.pythonhosted.org/packages/02/92/af8f5582787f5d1a8b130b2dcba785fa5e9a7a8e121a0bb2220a6fdbdb8a/coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3", size = 249799, upload-time = "2025-12-28T15:40:39.47Z" }, { url = "https://files.pythonhosted.org/packages/24/aa/0e39a2a3b16eebf7f193863323edbff38b6daba711abaaf807d4290cf61a/coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef", size = 251389, upload-time = "2025-12-28T15:40:40.954Z" }, { url = "https://files.pythonhosted.org/packages/73/46/7f0c13111154dc5b978900c0ccee2e2ca239b910890e674a77f1363d483e/coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851", size = 249450, upload-time = "2025-12-28T15:40:42.489Z" }, { url = "https://files.pythonhosted.org/packages/ac/ca/e80da6769e8b669ec3695598c58eef7ad98b0e26e66333996aee6316db23/coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb", size = 249170, upload-time = "2025-12-28T15:40:44.279Z" }, { url = "https://files.pythonhosted.org/packages/af/18/9e29baabdec1a8644157f572541079b4658199cfd372a578f84228e860de/coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba", size = 250081, upload-time = "2025-12-28T15:40:45.748Z" }, { url = "https://files.pythonhosted.org/packages/00/f8/c3021625a71c3b2f516464d322e41636aea381018319050a8114105872ee/coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19", size = 221281, upload-time = "2025-12-28T15:40:47.232Z" }, { url = "https://files.pythonhosted.org/packages/27/56/c216625f453df6e0559ed666d246fcbaaa93f3aa99eaa5080cea1229aa3d/coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a", size = 222215, upload-time = "2025-12-28T15:40:49.19Z" }, { url = "https://files.pythonhosted.org/packages/5c/9a/be342e76f6e531cae6406dc46af0d350586f24d9b67fdfa6daee02df71af/coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c", size = 220886, upload-time = "2025-12-28T15:40:51.067Z" }, { url = "https://files.pythonhosted.org/packages/ce/8a/87af46cccdfa78f53db747b09f5f9a21d5fc38d796834adac09b30a8ce74/coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3", size = 218927, upload-time = "2025-12-28T15:40:52.814Z" }, { url = "https://files.pythonhosted.org/packages/82/a8/6e22fdc67242a4a5a153f9438d05944553121c8f4ba70cb072af4c41362e/coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e", size = 219288, upload-time = "2025-12-28T15:40:54.262Z" }, { url = "https://files.pythonhosted.org/packages/d0/0a/853a76e03b0f7c4375e2ca025df45c918beb367f3e20a0a8e91967f6e96c/coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c", size = 250786, upload-time = "2025-12-28T15:40:56.059Z" }, { url = "https://files.pythonhosted.org/packages/ea/b4/694159c15c52b9f7ec7adf49d50e5f8ee71d3e9ef38adb4445d13dd56c20/coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62", size = 253543, upload-time = "2025-12-28T15:40:57.585Z" }, { url = "https://files.pythonhosted.org/packages/96/b2/7f1f0437a5c855f87e17cf5d0dc35920b6440ff2b58b1ba9788c059c26c8/coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968", size = 254635, upload-time = "2025-12-28T15:40:59.443Z" }, { url = "https://files.pythonhosted.org/packages/e9/d1/73c3fdb8d7d3bddd9473c9c6a2e0682f09fc3dfbcb9c3f36412a7368bcab/coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e", size = 251202, upload-time = "2025-12-28T15:41:01.328Z" }, { url = "https://files.pythonhosted.org/packages/66/3c/f0edf75dcc152f145d5598329e864bbbe04ab78660fe3e8e395f9fff010f/coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f", size = 252566, upload-time = "2025-12-28T15:41:03.319Z" }, { url = "https://files.pythonhosted.org/packages/17/b3/e64206d3c5f7dcbceafd14941345a754d3dbc78a823a6ed526e23b9cdaab/coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee", size = 250711, upload-time = "2025-12-28T15:41:06.411Z" }, { url = "https://files.pythonhosted.org/packages/dc/ad/28a3eb970a8ef5b479ee7f0c484a19c34e277479a5b70269dc652b730733/coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf", size = 250278, upload-time = "2025-12-28T15:41:08.285Z" }, { url = "https://files.pythonhosted.org/packages/54/e3/c8f0f1a93133e3e1291ca76cbb63565bd4b5c5df63b141f539d747fff348/coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c", size = 252154, upload-time = "2025-12-28T15:41:09.969Z" }, { url = "https://files.pythonhosted.org/packages/d0/bf/9939c5d6859c380e405b19e736321f1c7d402728792f4c752ad1adcce005/coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7", size = 221487, upload-time = "2025-12-28T15:41:11.468Z" }, { url = "https://files.pythonhosted.org/packages/fa/dc/7282856a407c621c2aad74021680a01b23010bb8ebf427cf5eacda2e876f/coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6", size = 222299, upload-time = "2025-12-28T15:41:13.386Z" }, { url = "https://files.pythonhosted.org/packages/10/79/176a11203412c350b3e9578620013af35bcdb79b651eb976f4a4b32044fa/coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c", size = 220941, upload-time = "2025-12-28T15:41:14.975Z" }, { url = "https://files.pythonhosted.org/packages/a3/a4/e98e689347a1ff1a7f67932ab535cef82eb5e78f32a9e4132e114bbb3a0a/coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78", size = 218951, upload-time = "2025-12-28T15:41:16.653Z" }, { url = "https://files.pythonhosted.org/packages/32/33/7cbfe2bdc6e2f03d6b240d23dc45fdaf3fd270aaf2d640be77b7f16989ab/coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b", size = 219325, upload-time = "2025-12-28T15:41:18.609Z" }, { url = "https://files.pythonhosted.org/packages/59/f6/efdabdb4929487baeb7cb2a9f7dac457d9356f6ad1b255be283d58b16316/coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd", size = 250309, upload-time = "2025-12-28T15:41:20.629Z" }, { url = "https://files.pythonhosted.org/packages/12/da/91a52516e9d5aea87d32d1523f9cdcf7a35a3b298e6be05d6509ba3cfab2/coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992", size = 252907, upload-time = "2025-12-28T15:41:22.257Z" }, { url = "https://files.pythonhosted.org/packages/75/38/f1ea837e3dc1231e086db1638947e00d264e7e8c41aa8ecacf6e1e0c05f4/coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4", size = 254148, upload-time = "2025-12-28T15:41:23.87Z" }, { url = "https://files.pythonhosted.org/packages/7f/43/f4f16b881aaa34954ba446318dea6b9ed5405dd725dd8daac2358eda869a/coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a", size = 250515, upload-time = "2025-12-28T15:41:25.437Z" }, { url = "https://files.pythonhosted.org/packages/84/34/8cba7f00078bd468ea914134e0144263194ce849ec3baad187ffb6203d1c/coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766", size = 252292, upload-time = "2025-12-28T15:41:28.459Z" }, { url = "https://files.pythonhosted.org/packages/8c/a4/cffac66c7652d84ee4ac52d3ccb94c015687d3b513f9db04bfcac2ac800d/coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4", size = 250242, upload-time = "2025-12-28T15:41:30.02Z" }, { url = "https://files.pythonhosted.org/packages/f4/78/9a64d462263dde416f3c0067efade7b52b52796f489b1037a95b0dc389c9/coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398", size = 250068, upload-time = "2025-12-28T15:41:32.007Z" }, { url = "https://files.pythonhosted.org/packages/69/c8/a8994f5fece06db7c4a97c8fc1973684e178599b42e66280dded0524ef00/coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784", size = 251846, upload-time = "2025-12-28T15:41:33.946Z" }, { url = "https://files.pythonhosted.org/packages/cc/f7/91fa73c4b80305c86598a2d4e54ba22df6bf7d0d97500944af7ef155d9f7/coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461", size = 221512, upload-time = "2025-12-28T15:41:35.519Z" }, { url = "https://files.pythonhosted.org/packages/45/0b/0768b4231d5a044da8f75e097a8714ae1041246bb765d6b5563bab456735/coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500", size = 222321, upload-time = "2025-12-28T15:41:37.371Z" }, { url = "https://files.pythonhosted.org/packages/9b/b8/bdcb7253b7e85157282450262008f1366aa04663f3e3e4c30436f596c3e2/coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9", size = 220949, upload-time = "2025-12-28T15:41:39.553Z" }, { url = "https://files.pythonhosted.org/packages/70/52/f2be52cc445ff75ea8397948c96c1b4ee14f7f9086ea62fc929c5ae7b717/coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc", size = 219643, upload-time = "2025-12-28T15:41:41.567Z" }, { url = "https://files.pythonhosted.org/packages/47/79/c85e378eaa239e2edec0c5523f71542c7793fe3340954eafb0bc3904d32d/coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a", size = 219997, upload-time = "2025-12-28T15:41:43.418Z" }, { url = "https://files.pythonhosted.org/packages/fe/9b/b1ade8bfb653c0bbce2d6d6e90cc6c254cbb99b7248531cc76253cb4da6d/coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4", size = 261296, upload-time = "2025-12-28T15:41:45.207Z" }, { url = "https://files.pythonhosted.org/packages/1f/af/ebf91e3e1a2473d523e87e87fd8581e0aa08741b96265730e2d79ce78d8d/coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6", size = 263363, upload-time = "2025-12-28T15:41:47.163Z" }, { url = "https://files.pythonhosted.org/packages/c4/8b/fb2423526d446596624ac7fde12ea4262e66f86f5120114c3cfd0bb2befa/coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1", size = 265783, upload-time = "2025-12-28T15:41:49.03Z" }, { url = "https://files.pythonhosted.org/packages/9b/26/ef2adb1e22674913b89f0fe7490ecadcef4a71fa96f5ced90c60ec358789/coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd", size = 260508, upload-time = "2025-12-28T15:41:51.035Z" }, { url = "https://files.pythonhosted.org/packages/ce/7d/f0f59b3404caf662e7b5346247883887687c074ce67ba453ea08c612b1d5/coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c", size = 263357, upload-time = "2025-12-28T15:41:52.631Z" }, { url = "https://files.pythonhosted.org/packages/1a/b1/29896492b0b1a047604d35d6fa804f12818fa30cdad660763a5f3159e158/coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0", size = 260978, upload-time = "2025-12-28T15:41:54.589Z" }, { url = "https://files.pythonhosted.org/packages/48/f2/971de1238a62e6f0a4128d37adadc8bb882ee96afbe03ff1570291754629/coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e", size = 259877, upload-time = "2025-12-28T15:41:56.263Z" }, { url = "https://files.pythonhosted.org/packages/6a/fc/0474efcbb590ff8628830e9aaec5f1831594874360e3251f1fdec31d07a3/coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53", size = 262069, upload-time = "2025-12-28T15:41:58.093Z" }, { url = "https://files.pythonhosted.org/packages/88/4f/3c159b7953db37a7b44c0eab8a95c37d1aa4257c47b4602c04022d5cb975/coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842", size = 222184, upload-time = "2025-12-28T15:41:59.763Z" }, { url = "https://files.pythonhosted.org/packages/58/a5/6b57d28f81417f9335774f20679d9d13b9a8fb90cd6160957aa3b54a2379/coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2", size = 223250, upload-time = "2025-12-28T15:42:01.52Z" }, { url = "https://files.pythonhosted.org/packages/81/7c/160796f3b035acfbb58be80e02e484548595aa67e16a6345e7910ace0a38/coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09", size = 221521, upload-time = "2025-12-28T15:42:03.275Z" }, { url = "https://files.pythonhosted.org/packages/aa/8e/ba0e597560c6563fc0adb902fda6526df5d4aa73bb10adf0574d03bd2206/coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894", size = 218996, upload-time = "2025-12-28T15:42:04.978Z" }, { url = "https://files.pythonhosted.org/packages/6b/8e/764c6e116f4221dc7aa26c4061181ff92edb9c799adae6433d18eeba7a14/coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a", size = 219326, upload-time = "2025-12-28T15:42:06.691Z" }, { url = "https://files.pythonhosted.org/packages/4f/a6/6130dc6d8da28cdcbb0f2bf8865aeca9b157622f7c0031e48c6cf9a0e591/coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f", size = 250374, upload-time = "2025-12-28T15:42:08.786Z" }, { url = "https://files.pythonhosted.org/packages/82/2b/783ded568f7cd6b677762f780ad338bf4b4750205860c17c25f7c708995e/coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909", size = 252882, upload-time = "2025-12-28T15:42:10.515Z" }, { url = "https://files.pythonhosted.org/packages/cd/b2/9808766d082e6a4d59eb0cc881a57fc1600eb2c5882813eefff8254f71b5/coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4", size = 254218, upload-time = "2025-12-28T15:42:12.208Z" }, { url = "https://files.pythonhosted.org/packages/44/ea/52a985bb447c871cb4d2e376e401116520991b597c85afdde1ea9ef54f2c/coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75", size = 250391, upload-time = "2025-12-28T15:42:14.21Z" }, { url = "https://files.pythonhosted.org/packages/7f/1d/125b36cc12310718873cfc8209ecfbc1008f14f4f5fa0662aa608e579353/coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9", size = 252239, upload-time = "2025-12-28T15:42:16.292Z" }, { url = "https://files.pythonhosted.org/packages/6a/16/10c1c164950cade470107f9f14bbac8485f8fb8515f515fca53d337e4a7f/coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465", size = 250196, upload-time = "2025-12-28T15:42:18.54Z" }, { url = "https://files.pythonhosted.org/packages/2a/c6/cd860fac08780c6fd659732f6ced1b40b79c35977c1356344e44d72ba6c4/coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864", size = 250008, upload-time = "2025-12-28T15:42:20.365Z" }, { url = "https://files.pythonhosted.org/packages/f0/3a/a8c58d3d38f82a5711e1e0a67268362af48e1a03df27c03072ac30feefcf/coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9", size = 251671, upload-time = "2025-12-28T15:42:22.114Z" }, { url = "https://files.pythonhosted.org/packages/f0/bc/fd4c1da651d037a1e3d53e8cb3f8182f4b53271ffa9a95a2e211bacc0349/coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5", size = 221777, upload-time = "2025-12-28T15:42:23.919Z" }, { url = "https://files.pythonhosted.org/packages/4b/50/71acabdc8948464c17e90b5ffd92358579bd0910732c2a1c9537d7536aa6/coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a", size = 222592, upload-time = "2025-12-28T15:42:25.619Z" }, { url = "https://files.pythonhosted.org/packages/f7/c8/a6fb943081bb0cc926499c7907731a6dc9efc2cbdc76d738c0ab752f1a32/coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0", size = 221169, upload-time = "2025-12-28T15:42:27.629Z" }, { url = "https://files.pythonhosted.org/packages/16/61/d5b7a0a0e0e40d62e59bc8c7aa1afbd86280d82728ba97f0673b746b78e2/coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a", size = 219730, upload-time = "2025-12-28T15:42:29.306Z" }, { url = "https://files.pythonhosted.org/packages/a3/2c/8881326445fd071bb49514d1ce97d18a46a980712b51fee84f9ab42845b4/coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6", size = 220001, upload-time = "2025-12-28T15:42:31.319Z" }, { url = "https://files.pythonhosted.org/packages/b5/d7/50de63af51dfa3a7f91cc37ad8fcc1e244b734232fbc8b9ab0f3c834a5cd/coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673", size = 261370, upload-time = "2025-12-28T15:42:32.992Z" }, { url = "https://files.pythonhosted.org/packages/e1/2c/d31722f0ec918fd7453b2758312729f645978d212b410cd0f7c2aed88a94/coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5", size = 263485, upload-time = "2025-12-28T15:42:34.759Z" }, { url = "https://files.pythonhosted.org/packages/fa/7a/2c114fa5c5fc08ba0777e4aec4c97e0b4a1afcb69c75f1f54cff78b073ab/coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d", size = 265890, upload-time = "2025-12-28T15:42:36.517Z" }, { url = "https://files.pythonhosted.org/packages/65/d9/f0794aa1c74ceabc780fe17f6c338456bbc4e96bd950f2e969f48ac6fb20/coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8", size = 260445, upload-time = "2025-12-28T15:42:38.646Z" }, { url = "https://files.pythonhosted.org/packages/49/23/184b22a00d9bb97488863ced9454068c79e413cb23f472da6cbddc6cfc52/coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486", size = 263357, upload-time = "2025-12-28T15:42:40.788Z" }, { url = "https://files.pythonhosted.org/packages/7d/bd/58af54c0c9199ea4190284f389005779d7daf7bf3ce40dcd2d2b2f96da69/coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564", size = 260959, upload-time = "2025-12-28T15:42:42.808Z" }, { url = "https://files.pythonhosted.org/packages/4b/2a/6839294e8f78a4891bf1df79d69c536880ba2f970d0ff09e7513d6e352e9/coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7", size = 259792, upload-time = "2025-12-28T15:42:44.818Z" }, { url = "https://files.pythonhosted.org/packages/ba/c3/528674d4623283310ad676c5af7414b9850ab6d55c2300e8aa4b945ec554/coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416", size = 262123, upload-time = "2025-12-28T15:42:47.108Z" }, { url = "https://files.pythonhosted.org/packages/06/c5/8c0515692fb4c73ac379d8dc09b18eaf0214ecb76ea6e62467ba7a1556ff/coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f", size = 222562, upload-time = "2025-12-28T15:42:49.144Z" }, { url = "https://files.pythonhosted.org/packages/05/0e/c0a0c4678cb30dac735811db529b321d7e1c9120b79bd728d4f4d6b010e9/coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79", size = 223670, upload-time = "2025-12-28T15:42:51.218Z" }, { url = "https://files.pythonhosted.org/packages/f5/5f/b177aa0011f354abf03a8f30a85032686d290fdeed4222b27d36b4372a50/coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4", size = 221707, upload-time = "2025-12-28T15:42:53.034Z" }, { url = "https://files.pythonhosted.org/packages/cc/48/d9f421cb8da5afaa1a64570d9989e00fb7955e6acddc5a12979f7666ef60/coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573", size = 210722, upload-time = "2025-12-28T15:42:54.901Z" }, ] [package.optional-dependencies] toml = [ { name = "tomli", marker = "python_full_version <= '3.11'" }, ] [[package]] name = "dnspython" version = "2.8.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/8c/8b/57666417c0f90f08bcafa776861060426765fdb422eb10212086fb811d26/dnspython-2.8.0.tar.gz", hash = "sha256:181d3c6996452cb1189c4046c61599b84a5a86e099562ffde77d26984ff26d0f", size = 368251, upload-time = "2025-09-07T18:58:00.022Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ba/5a/18ad964b0086c6e62e2e7500f7edc89e3faa45033c71c1893d34eed2b2de/dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af", size = 331094, upload-time = "2025-09-07T18:57:58.071Z" }, ] [[package]] name = "eventlet" version = "0.40.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "dnspython" }, { name = "greenlet" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d1/d8/f72d8583db7c559445e0e9500a9b9787332370c16980802204a403634585/eventlet-0.40.4.tar.gz", hash = "sha256:69bef712b1be18b4930df6f0c495d2a882bf7b63aa111e7b6eeff461cfcaf26f", size = 565920, upload-time = "2025-11-26T13:57:31.126Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/22/6d/8e1fa901f6a8307f90e7bd932064e27a0062a4a7a16af38966a9c3293c52/eventlet-0.40.4-py3-none-any.whl", hash = "sha256:6326c6d0bf55810bece151f7a5750207c610f389ba110ffd1541ed6e5215485b", size = 364588, upload-time = "2025-11-26T13:57:29.09Z" }, ] [[package]] name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] [[package]] name = "gevent" version = "25.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, { name = "zope-event" }, { name = "zope-interface" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9e/48/b3ef2673ffb940f980966694e40d6d32560f3ffa284ecaeb5ea3a90a6d3f/gevent-25.9.1.tar.gz", hash = "sha256:adf9cd552de44a4e6754c51ff2e78d9193b7fa6eab123db9578a210e657235dd", size = 5059025, upload-time = "2025-09-17T16:15:34.528Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ae/c7/2c60fc4e5c9144f2b91e23af8d87c626870ad3183cfd09d2b3ba6d699178/gevent-25.9.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:856b990be5590e44c3a3dc6c8d48a40eaccbb42e99d2b791d11d1e7711a4297e", size = 1831980, upload-time = "2025-09-17T15:41:22.597Z" }, { url = "https://files.pythonhosted.org/packages/2e/ae/49bf0a01f95a1c92c001d7b3f482a2301626b8a0617f448c4cd14ca9b5d4/gevent-25.9.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:fe1599d0b30e6093eb3213551751b24feeb43db79f07e89d98dd2f3330c9063e", size = 1918777, upload-time = "2025-09-17T15:48:57.223Z" }, { url = "https://files.pythonhosted.org/packages/88/3f/266d2eb9f5d75c184a55a39e886b53a4ea7f42ff31f195220a363f0e3f9e/gevent-25.9.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:f0d8b64057b4bf1529b9ef9bd2259495747fba93d1f836c77bfeaacfec373fd0", size = 1869235, upload-time = "2025-09-17T15:49:18.255Z" }, { url = "https://files.pythonhosted.org/packages/76/24/c0c7c7db70ca74c7b1918388ebda7c8c2a3c3bff0bbfbaa9280ed04b3340/gevent-25.9.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b56cbc820e3136ba52cd690bdf77e47a4c239964d5f80dc657c1068e0fe9521c", size = 2177334, upload-time = "2025-09-17T15:15:10.073Z" }, { url = "https://files.pythonhosted.org/packages/4c/1e/de96bd033c03955f54c455b51a5127b1d540afcfc97838d1801fafce6d2e/gevent-25.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c5fa9ce5122c085983e33e0dc058f81f5264cebe746de5c401654ab96dddfca8", size = 1847708, upload-time = "2025-09-17T15:52:38.475Z" }, { url = "https://files.pythonhosted.org/packages/26/8b/6851e9cd3e4f322fa15c1d196cbf1a8a123da69788b078227dd13dd4208f/gevent-25.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:03c74fec58eda4b4edc043311fca8ba4f8744ad1632eb0a41d5ec25413581975", size = 2234274, upload-time = "2025-09-17T15:24:07.797Z" }, { url = "https://files.pythonhosted.org/packages/0f/d8/b1178b70538c91493bec283018b47c16eab4bac9ddf5a3d4b7dd905dab60/gevent-25.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a8ae9f895e8651d10b0a8328a61c9c53da11ea51b666388aa99b0ce90f9fdc27", size = 1695326, upload-time = "2025-09-17T20:10:25.455Z" }, { url = "https://files.pythonhosted.org/packages/81/86/03f8db0704fed41b0fa830425845f1eb4e20c92efa3f18751ee17809e9c6/gevent-25.9.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5aff9e8342dc954adb9c9c524db56c2f3557999463445ba3d9cbe3dada7b7", size = 1792418, upload-time = "2025-09-17T15:41:24.384Z" }, { url = "https://files.pythonhosted.org/packages/5f/35/f6b3a31f0849a62cfa2c64574bcc68a781d5499c3195e296e892a121a3cf/gevent-25.9.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1cdf6db28f050ee103441caa8b0448ace545364f775059d5e2de089da975c457", size = 1875700, upload-time = "2025-09-17T15:48:59.652Z" }, { url = "https://files.pythonhosted.org/packages/66/1e/75055950aa9b48f553e061afa9e3728061b5ccecca358cef19166e4ab74a/gevent-25.9.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:812debe235a8295be3b2a63b136c2474241fa5c58af55e6a0f8cfc29d4936235", size = 1831365, upload-time = "2025-09-17T15:49:19.426Z" }, { url = "https://files.pythonhosted.org/packages/31/e8/5c1f6968e5547e501cfa03dcb0239dff55e44c3660a37ec534e32a0c008f/gevent-25.9.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b28b61ff9216a3d73fe8f35669eefcafa957f143ac534faf77e8a19eb9e6883a", size = 2122087, upload-time = "2025-09-17T15:15:12.329Z" }, { url = "https://files.pythonhosted.org/packages/c0/2c/ebc5d38a7542af9fb7657bfe10932a558bb98c8a94e4748e827d3823fced/gevent-25.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5e4b6278b37373306fc6b1e5f0f1cf56339a1377f67c35972775143d8d7776ff", size = 1808776, upload-time = "2025-09-17T15:52:40.16Z" }, { url = "https://files.pythonhosted.org/packages/e6/26/e1d7d6c8ffbf76fe1fbb4e77bdb7f47d419206adc391ec40a8ace6ebbbf0/gevent-25.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d99f0cb2ce43c2e8305bf75bee61a8bde06619d21b9d0316ea190fc7a0620a56", size = 2179141, upload-time = "2025-09-17T15:24:09.895Z" }, { url = "https://files.pythonhosted.org/packages/1d/6c/bb21fd9c095506aeeaa616579a356aa50935165cc0f1e250e1e0575620a7/gevent-25.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:72152517ecf548e2f838c61b4be76637d99279dbaa7e01b3924df040aa996586", size = 1677941, upload-time = "2025-09-17T19:59:50.185Z" }, { url = "https://files.pythonhosted.org/packages/f7/49/e55930ba5259629eb28ac7ee1abbca971996a9165f902f0249b561602f24/gevent-25.9.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:46b188248c84ffdec18a686fcac5dbb32365d76912e14fda350db5dc0bfd4f86", size = 2955991, upload-time = "2025-09-17T14:52:30.568Z" }, { url = "https://files.pythonhosted.org/packages/aa/88/63dc9e903980e1da1e16541ec5c70f2b224ec0a8e34088cb42794f1c7f52/gevent-25.9.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f2b54ea3ca6f0c763281cd3f96010ac7e98c2e267feb1221b5a26e2ca0b9a692", size = 1808503, upload-time = "2025-09-17T15:41:25.59Z" }, { url = "https://files.pythonhosted.org/packages/7a/8d/7236c3a8f6ef7e94c22e658397009596fa90f24c7d19da11ad7ab3a9248e/gevent-25.9.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7a834804ac00ed8a92a69d3826342c677be651b1c3cd66cc35df8bc711057aa2", size = 1890001, upload-time = "2025-09-17T15:49:01.227Z" }, { url = "https://files.pythonhosted.org/packages/4f/63/0d7f38c4a2085ecce26b50492fc6161aa67250d381e26d6a7322c309b00f/gevent-25.9.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:323a27192ec4da6b22a9e51c3d9d896ff20bc53fdc9e45e56eaab76d1c39dd74", size = 1855335, upload-time = "2025-09-17T15:49:20.582Z" }, { url = "https://files.pythonhosted.org/packages/95/18/da5211dfc54c7a57e7432fd9a6ffeae1ce36fe5a313fa782b1c96529ea3d/gevent-25.9.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6ea78b39a2c51d47ff0f130f4c755a9a4bbb2dd9721149420ad4712743911a51", size = 2109046, upload-time = "2025-09-17T15:15:13.817Z" }, { url = "https://files.pythonhosted.org/packages/a6/5a/7bb5ec8e43a2c6444853c4a9f955f3e72f479d7c24ea86c95fb264a2de65/gevent-25.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dc45cd3e1cc07514a419960af932a62eb8515552ed004e56755e4bf20bad30c5", size = 1827099, upload-time = "2025-09-17T15:52:41.384Z" }, { url = "https://files.pythonhosted.org/packages/ca/d4/b63a0a60635470d7d986ef19897e893c15326dd69e8fb342c76a4f07fe9e/gevent-25.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34e01e50c71eaf67e92c186ee0196a039d6e4f4b35670396baed4a2d8f1b347f", size = 2172623, upload-time = "2025-09-17T15:24:12.03Z" }, { url = "https://files.pythonhosted.org/packages/d5/98/caf06d5d22a7c129c1fb2fc1477306902a2c8ddfd399cd26bbbd4caf2141/gevent-25.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acd6bcd5feabf22c7c5174bd3b9535ee9f088d2bbce789f740ad8d6554b18f3", size = 1682837, upload-time = "2025-09-17T19:48:47.318Z" }, { url = "https://files.pythonhosted.org/packages/5a/77/b97f086388f87f8ad3e01364f845004aef0123d4430241c7c9b1f9bde742/gevent-25.9.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:4f84591d13845ee31c13f44bdf6bd6c3dbf385b5af98b2f25ec328213775f2ed", size = 2973739, upload-time = "2025-09-17T14:53:30.279Z" }, { url = "https://files.pythonhosted.org/packages/3c/2e/9d5f204ead343e5b27bbb2fedaec7cd0009d50696b2266f590ae845d0331/gevent-25.9.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9cdbb24c276a2d0110ad5c978e49daf620b153719ac8a548ce1250a7eb1b9245", size = 1809165, upload-time = "2025-09-17T15:41:27.193Z" }, { url = "https://files.pythonhosted.org/packages/10/3e/791d1bf1eb47748606d5f2c2aa66571f474d63e0176228b1f1fd7b77ab37/gevent-25.9.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:88b6c07169468af631dcf0fdd3658f9246d6822cc51461d43f7c44f28b0abb82", size = 1890638, upload-time = "2025-09-17T15:49:02.45Z" }, { url = "https://files.pythonhosted.org/packages/f2/5c/9ad0229b2b4d81249ca41e4f91dd8057deaa0da6d4fbe40bf13cdc5f7a47/gevent-25.9.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b7bb0e29a7b3e6ca9bed2394aa820244069982c36dc30b70eb1004dd67851a48", size = 1857118, upload-time = "2025-09-17T15:49:22.125Z" }, { url = "https://files.pythonhosted.org/packages/49/2a/3010ed6c44179a3a5c5c152e6de43a30ff8bc2c8de3115ad8733533a018f/gevent-25.9.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2951bb070c0ee37b632ac9134e4fdaad70d2e660c931bb792983a0837fe5b7d7", size = 2111598, upload-time = "2025-09-17T15:15:15.226Z" }, { url = "https://files.pythonhosted.org/packages/08/75/6bbe57c19a7aa4527cc0f9afcdf5a5f2aed2603b08aadbccb5bf7f607ff4/gevent-25.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e4e17c2d57e9a42e25f2a73d297b22b60b2470a74be5a515b36c984e1a246d47", size = 1829059, upload-time = "2025-09-17T15:52:42.596Z" }, { url = "https://files.pythonhosted.org/packages/06/6e/19a9bee9092be45679cb69e4dd2e0bf5f897b7140b4b39c57cc123d24829/gevent-25.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d94936f8f8b23d9de2251798fcb603b84f083fdf0d7f427183c1828fb64f117", size = 2173529, upload-time = "2025-09-17T15:24:13.897Z" }, { url = "https://files.pythonhosted.org/packages/ca/4f/50de9afd879440e25737e63f5ba6ee764b75a3abe17376496ab57f432546/gevent-25.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb51c5f9537b07da673258b4832f6635014fee31690c3f0944d34741b69f92fa", size = 1681518, upload-time = "2025-09-17T19:39:47.488Z" }, { url = "https://files.pythonhosted.org/packages/15/1a/948f8167b2cdce573cf01cec07afc64d0456dc134b07900b26ac7018b37e/gevent-25.9.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:1a3fe4ea1c312dbf6b375b416925036fe79a40054e6bf6248ee46526ea628be1", size = 2982934, upload-time = "2025-09-17T14:54:11.302Z" }, { url = "https://files.pythonhosted.org/packages/9b/ec/726b146d1d3aad82e03d2e1e1507048ab6072f906e83f97f40667866e582/gevent-25.9.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0adb937f13e5fb90cca2edf66d8d7e99d62a299687400ce2edee3f3504009356", size = 1813982, upload-time = "2025-09-17T15:41:28.506Z" }, { url = "https://files.pythonhosted.org/packages/35/5d/5f83f17162301662bd1ce702f8a736a8a8cac7b7a35e1d8b9866938d1f9d/gevent-25.9.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:427f869a2050a4202d93cf7fd6ab5cffb06d3e9113c10c967b6e2a0d45237cb8", size = 1894902, upload-time = "2025-09-17T15:49:03.702Z" }, { url = "https://files.pythonhosted.org/packages/83/cd/cf5e74e353f60dab357829069ffc300a7bb414c761f52cf8c0c6e9728b8d/gevent-25.9.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c049880175e8c93124188f9d926af0a62826a3b81aa6d3074928345f8238279e", size = 1861792, upload-time = "2025-09-17T15:49:23.279Z" }, { url = "https://files.pythonhosted.org/packages/dd/65/b9a4526d4a4edce26fe4b3b993914ec9dc64baabad625a3101e51adb17f3/gevent-25.9.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b5a67a0974ad9f24721034d1e008856111e0535f1541499f72a733a73d658d1c", size = 2113215, upload-time = "2025-09-17T15:15:16.34Z" }, { url = "https://files.pythonhosted.org/packages/e5/be/7d35731dfaf8370795b606e515d964a0967e129db76ea7873f552045dd39/gevent-25.9.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1d0f5d8d73f97e24ea8d24d8be0f51e0cf7c54b8021c1fddb580bf239474690f", size = 1833449, upload-time = "2025-09-17T15:52:43.75Z" }, { url = "https://files.pythonhosted.org/packages/65/58/7bc52544ea5e63af88c4a26c90776feb42551b7555a1c89c20069c168a3f/gevent-25.9.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ddd3ff26e5c4240d3fbf5516c2d9d5f2a998ef87cfb73e1429cfaeaaec860fa6", size = 2176034, upload-time = "2025-09-17T15:24:15.676Z" }, { url = "https://files.pythonhosted.org/packages/c2/69/a7c4ba2ffbc7c7dbf6d8b4f5d0f0a421f7815d229f4909854266c445a3d4/gevent-25.9.1-cp314-cp314-win_amd64.whl", hash = "sha256:bb63c0d6cb9950cc94036a4995b9cc4667b8915366613449236970f4394f94d7", size = 1703019, upload-time = "2025-09-17T19:30:55.272Z" }, ] [[package]] name = "greenlet" version = "3.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/c7/e5/40dbda2736893e3e53d25838e0f19a2b417dfc122b9989c91918db30b5d3/greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb", size = 190651, upload-time = "2025-12-04T14:49:44.05Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/32/6a/33d1702184d94106d3cdd7bfb788e19723206fce152e303473ca3b946c7b/greenlet-3.3.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:6f8496d434d5cb2dce025773ba5597f71f5410ae499d5dd9533e0653258cdb3d", size = 273658, upload-time = "2025-12-04T14:23:37.494Z" }, { url = "https://files.pythonhosted.org/packages/d6/b7/2b5805bbf1907c26e434f4e448cd8b696a0b71725204fa21a211ff0c04a7/greenlet-3.3.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b96dc7eef78fd404e022e165ec55327f935b9b52ff355b067eb4a0267fc1cffb", size = 574810, upload-time = "2025-12-04T14:50:04.154Z" }, { url = "https://files.pythonhosted.org/packages/94/38/343242ec12eddf3d8458c73f555c084359883d4ddc674240d9e61ec51fd6/greenlet-3.3.0-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:73631cd5cccbcfe63e3f9492aaa664d278fda0ce5c3d43aeda8e77317e38efbd", size = 586248, upload-time = "2025-12-04T14:57:39.35Z" }, { url = "https://files.pythonhosted.org/packages/f0/d0/0ae86792fb212e4384041e0ef8e7bc66f59a54912ce407d26a966ed2914d/greenlet-3.3.0-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b299a0cb979f5d7197442dccc3aee67fce53500cd88951b7e6c35575701c980b", size = 597403, upload-time = "2025-12-04T15:07:10.831Z" }, { url = "https://files.pythonhosted.org/packages/b6/a8/15d0aa26c0036a15d2659175af00954aaaa5d0d66ba538345bd88013b4d7/greenlet-3.3.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7dee147740789a4632cace364816046e43310b59ff8fb79833ab043aefa72fd5", size = 586910, upload-time = "2025-12-04T14:25:59.705Z" }, { url = "https://files.pythonhosted.org/packages/e1/9b/68d5e3b7ccaba3907e5532cf8b9bf16f9ef5056a008f195a367db0ff32db/greenlet-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:39b28e339fc3c348427560494e28d8a6f3561c8d2bcf7d706e1c624ed8d822b9", size = 1547206, upload-time = "2025-12-04T15:04:21.027Z" }, { url = "https://files.pythonhosted.org/packages/66/bd/e3086ccedc61e49f91e2cfb5ffad9d8d62e5dc85e512a6200f096875b60c/greenlet-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3c374782c2935cc63b2a27ba8708471de4ad1abaa862ffdb1ef45a643ddbb7d", size = 1613359, upload-time = "2025-12-04T14:27:26.548Z" }, { url = "https://files.pythonhosted.org/packages/f4/6b/d4e73f5dfa888364bbf02efa85616c6714ae7c631c201349782e5b428925/greenlet-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:b49e7ed51876b459bd645d83db257f0180e345d3f768a35a85437a24d5a49082", size = 300740, upload-time = "2025-12-04T14:47:52.773Z" }, { url = "https://files.pythonhosted.org/packages/1f/cb/48e964c452ca2b92175a9b2dca037a553036cb053ba69e284650ce755f13/greenlet-3.3.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e29f3018580e8412d6aaf5641bb7745d38c85228dacf51a73bd4e26ddf2a6a8e", size = 274908, upload-time = "2025-12-04T14:23:26.435Z" }, { url = "https://files.pythonhosted.org/packages/28/da/38d7bff4d0277b594ec557f479d65272a893f1f2a716cad91efeb8680953/greenlet-3.3.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a687205fb22794e838f947e2194c0566d3812966b41c78709554aa883183fb62", size = 577113, upload-time = "2025-12-04T14:50:05.493Z" }, { url = "https://files.pythonhosted.org/packages/3c/f2/89c5eb0faddc3ff014f1c04467d67dee0d1d334ab81fadbf3744847f8a8a/greenlet-3.3.0-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4243050a88ba61842186cb9e63c7dfa677ec146160b0efd73b855a3d9c7fcf32", size = 590338, upload-time = "2025-12-04T14:57:41.136Z" }, { url = "https://files.pythonhosted.org/packages/80/d7/db0a5085035d05134f8c089643da2b44cc9b80647c39e93129c5ef170d8f/greenlet-3.3.0-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:670d0f94cd302d81796e37299bcd04b95d62403883b24225c6b5271466612f45", size = 601098, upload-time = "2025-12-04T15:07:11.898Z" }, { url = "https://files.pythonhosted.org/packages/dc/a6/e959a127b630a58e23529972dbc868c107f9d583b5a9f878fb858c46bc1a/greenlet-3.3.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cb3a8ec3db4a3b0eb8a3c25436c2d49e3505821802074969db017b87bc6a948", size = 590206, upload-time = "2025-12-04T14:26:01.254Z" }, { url = "https://files.pythonhosted.org/packages/48/60/29035719feb91798693023608447283b266b12efc576ed013dd9442364bb/greenlet-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2de5a0b09eab81fc6a382791b995b1ccf2b172a9fec934747a7a23d2ff291794", size = 1550668, upload-time = "2025-12-04T15:04:22.439Z" }, { url = "https://files.pythonhosted.org/packages/0a/5f/783a23754b691bfa86bd72c3033aa107490deac9b2ef190837b860996c9f/greenlet-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4449a736606bd30f27f8e1ff4678ee193bc47f6ca810d705981cfffd6ce0d8c5", size = 1615483, upload-time = "2025-12-04T14:27:28.083Z" }, { url = "https://files.pythonhosted.org/packages/1d/d5/c339b3b4bc8198b7caa4f2bd9fd685ac9f29795816d8db112da3d04175bb/greenlet-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:7652ee180d16d447a683c04e4c5f6441bae7ba7b17ffd9f6b3aff4605e9e6f71", size = 301164, upload-time = "2025-12-04T14:42:51.577Z" }, { url = "https://files.pythonhosted.org/packages/f8/0a/a3871375c7b9727edaeeea994bfff7c63ff7804c9829c19309ba2e058807/greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb", size = 276379, upload-time = "2025-12-04T14:23:30.498Z" }, { url = "https://files.pythonhosted.org/packages/43/ab/7ebfe34dce8b87be0d11dae91acbf76f7b8246bf9d6b319c741f99fa59c6/greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3", size = 597294, upload-time = "2025-12-04T14:50:06.847Z" }, { url = "https://files.pythonhosted.org/packages/a4/39/f1c8da50024feecd0793dbd5e08f526809b8ab5609224a2da40aad3a7641/greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655", size = 607742, upload-time = "2025-12-04T14:57:42.349Z" }, { url = "https://files.pythonhosted.org/packages/77/cb/43692bcd5f7a0da6ec0ec6d58ee7cddb606d055ce94a62ac9b1aa481e969/greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7", size = 622297, upload-time = "2025-12-04T15:07:13.552Z" }, { url = "https://files.pythonhosted.org/packages/75/b0/6bde0b1011a60782108c01de5913c588cf51a839174538d266de15e4bf4d/greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b", size = 609885, upload-time = "2025-12-04T14:26:02.368Z" }, { url = "https://files.pythonhosted.org/packages/49/0e/49b46ac39f931f59f987b7cd9f34bfec8ef81d2a1e6e00682f55be5de9f4/greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53", size = 1567424, upload-time = "2025-12-04T15:04:23.757Z" }, { url = "https://files.pythonhosted.org/packages/05/f5/49a9ac2dff7f10091935def9165c90236d8f175afb27cbed38fb1d61ab6b/greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614", size = 1636017, upload-time = "2025-12-04T14:27:29.688Z" }, { url = "https://files.pythonhosted.org/packages/6c/79/3912a94cf27ec503e51ba493692d6db1e3cd8ac7ac52b0b47c8e33d7f4f9/greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39", size = 301964, upload-time = "2025-12-04T14:36:58.316Z" }, { url = "https://files.pythonhosted.org/packages/02/2f/28592176381b9ab2cafa12829ba7b472d177f3acc35d8fbcf3673d966fff/greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739", size = 275140, upload-time = "2025-12-04T14:23:01.282Z" }, { url = "https://files.pythonhosted.org/packages/2c/80/fbe937bf81e9fca98c981fe499e59a3f45df2a04da0baa5c2be0dca0d329/greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808", size = 599219, upload-time = "2025-12-04T14:50:08.309Z" }, { url = "https://files.pythonhosted.org/packages/c2/ff/7c985128f0514271b8268476af89aee6866df5eec04ac17dcfbc676213df/greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54", size = 610211, upload-time = "2025-12-04T14:57:43.968Z" }, { url = "https://files.pythonhosted.org/packages/79/07/c47a82d881319ec18a4510bb30463ed6891f2ad2c1901ed5ec23d3de351f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492", size = 624311, upload-time = "2025-12-04T15:07:14.697Z" }, { url = "https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527", size = 612833, upload-time = "2025-12-04T14:26:03.669Z" }, { url = "https://files.pythonhosted.org/packages/b5/ba/56699ff9b7c76ca12f1cdc27a886d0f81f2189c3455ff9f65246780f713d/greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39", size = 1567256, upload-time = "2025-12-04T15:04:25.276Z" }, { url = "https://files.pythonhosted.org/packages/1e/37/f31136132967982d698c71a281a8901daf1a8fbab935dce7c0cf15f942cc/greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8", size = 1636483, upload-time = "2025-12-04T14:27:30.804Z" }, { url = "https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38", size = 301833, upload-time = "2025-12-04T14:32:23.929Z" }, { url = "https://files.pythonhosted.org/packages/d7/7c/f0a6d0ede2c7bf092d00bc83ad5bafb7e6ec9b4aab2fbdfa6f134dc73327/greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f", size = 275671, upload-time = "2025-12-04T14:23:05.267Z" }, { url = "https://files.pythonhosted.org/packages/44/06/dac639ae1a50f5969d82d2e3dd9767d30d6dbdbab0e1a54010c8fe90263c/greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365", size = 646360, upload-time = "2025-12-04T14:50:10.026Z" }, { url = "https://files.pythonhosted.org/packages/e0/94/0fb76fe6c5369fba9bf98529ada6f4c3a1adf19e406a47332245ef0eb357/greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3", size = 658160, upload-time = "2025-12-04T14:57:45.41Z" }, { url = "https://files.pythonhosted.org/packages/93/79/d2c70cae6e823fac36c3bbc9077962105052b7ef81db2f01ec3b9bf17e2b/greenlet-3.3.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dcd2bdbd444ff340e8d6bdf54d2f206ccddbb3ccfdcd3c25bf4afaa7b8f0cf45", size = 671388, upload-time = "2025-12-04T15:07:15.789Z" }, { url = "https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955", size = 660166, upload-time = "2025-12-04T14:26:05.099Z" }, { url = "https://files.pythonhosted.org/packages/4b/d2/91465d39164eaa0085177f61983d80ffe746c5a1860f009811d498e7259c/greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55", size = 1615193, upload-time = "2025-12-04T15:04:27.041Z" }, { url = "https://files.pythonhosted.org/packages/42/1b/83d110a37044b92423084d52d5d5a3b3a73cafb51b547e6d7366ff62eff1/greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc", size = 1683653, upload-time = "2025-12-04T14:27:32.366Z" }, { url = "https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170", size = 305387, upload-time = "2025-12-04T14:26:51.063Z" }, { url = "https://files.pythonhosted.org/packages/a0/66/bd6317bc5932accf351fc19f177ffba53712a202f9df10587da8df257c7e/greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931", size = 282638, upload-time = "2025-12-04T14:25:20.941Z" }, { url = "https://files.pythonhosted.org/packages/30/cf/cc81cb030b40e738d6e69502ccbd0dd1bced0588e958f9e757945de24404/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388", size = 651145, upload-time = "2025-12-04T14:50:11.039Z" }, { url = "https://files.pythonhosted.org/packages/9c/ea/1020037b5ecfe95ca7df8d8549959baceb8186031da83d5ecceff8b08cd2/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3", size = 654236, upload-time = "2025-12-04T14:57:47.007Z" }, { url = "https://files.pythonhosted.org/packages/69/cc/1e4bae2e45ca2fa55299f4e85854606a78ecc37fead20d69322f96000504/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2662433acbca297c9153a4023fe2161c8dcfdcc91f10433171cf7e7d94ba2221", size = 662506, upload-time = "2025-12-04T15:07:16.906Z" }, { url = "https://files.pythonhosted.org/packages/57/b9/f8025d71a6085c441a7eaff0fd928bbb275a6633773667023d19179fe815/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b", size = 653783, upload-time = "2025-12-04T14:26:06.225Z" }, { url = "https://files.pythonhosted.org/packages/f6/c7/876a8c7a7485d5d6b5c6821201d542ef28be645aa024cfe1145b35c120c1/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd", size = 1614857, upload-time = "2025-12-04T15:04:28.484Z" }, { url = "https://files.pythonhosted.org/packages/4f/dc/041be1dff9f23dac5f48a43323cd0789cb798342011c19a248d9c9335536/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9", size = 1676034, upload-time = "2025-12-04T14:27:33.531Z" }, ] [[package]] name = "gunicorn" source = { editable = "." } dependencies = [ { name = "packaging" }, ] [package.optional-dependencies] eventlet = [ { name = "eventlet" }, ] gevent = [ { name = "gevent" }, ] setproctitle = [ { name = "setproctitle" }, ] testing = [ { name = "coverage" }, { name = "eventlet" }, { name = "gevent" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-cov" }, ] tornado = [ { name = "tornado" }, ] [package.metadata] requires-dist = [ { name = "coverage", marker = "extra == 'testing'" }, { name = "eventlet", marker = "extra == 'eventlet'", specifier = ">=0.40.3" }, { name = "eventlet", marker = "extra == 'testing'", specifier = ">=0.40.3" }, { name = "gevent", marker = "extra == 'gevent'", specifier = ">=23.9.0" }, { name = "gevent", marker = "extra == 'testing'", specifier = ">=23.9.0" }, { name = "packaging" }, { name = "pytest", marker = "extra == 'testing'" }, { name = "pytest-asyncio", marker = "extra == 'testing'" }, { name = "pytest-cov", marker = "extra == 'testing'" }, { name = "setproctitle", marker = "extra == 'setproctitle'" }, { name = "tornado", marker = "extra == 'tornado'", specifier = ">=6.5.0" }, ] provides-extras = ["gevent", "eventlet", "tornado", "gthread", "setproctitle", "testing"] [[package]] name = "iniconfig" version = "2.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] [[package]] name = "packaging" version = "26.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] [[package]] name = "pluggy" version = "1.6.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] [[package]] name = "pycparser" version = "3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/1b/7d/92392ff7815c21062bea51aa7b87d45576f649f16458d78b7cf94b9ab2e6/pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", size = 103492, upload-time = "2026-01-21T14:26:51.89Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" }, ] [[package]] name = "pygments" version = "2.19.2" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, ] [[package]] name = "pytest" version = "9.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, { name = "iniconfig" }, { name = "packaging" }, { name = "pluggy" }, { name = "pygments" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, ] [[package]] name = "pytest-asyncio" version = "1.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, { name = "pytest" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, ] [[package]] name = "pytest-cov" version = "7.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "coverage", extra = ["toml"] }, { name = "pluggy" }, { name = "pytest" }, ] sdist = { url = "https://files.pythonhosted.org/packages/5e/f7/c933acc76f5208b3b00089573cf6a2bc26dc80a8aece8f52bb7d6b1855ca/pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1", size = 54328, upload-time = "2025-09-09T10:57:02.113Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, ] [[package]] name = "setproctitle" version = "1.3.7" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/8d/48/49393a96a2eef1ab418b17475fb92b8fcfad83d099e678751b05472e69de/setproctitle-1.3.7.tar.gz", hash = "sha256:bc2bc917691c1537d5b9bca1468437176809c7e11e5694ca79a9ca12345dcb9e", size = 27002, upload-time = "2025-09-05T12:51:25.278Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/f2/48/fb401ec8c4953d519d05c87feca816ad668b8258448ff60579ac7a1c1386/setproctitle-1.3.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf555b6299f10a6eb44e4f96d2f5a3884c70ce25dc5c8796aaa2f7b40e72cb1b", size = 18079, upload-time = "2025-09-05T12:49:07.732Z" }, { url = "https://files.pythonhosted.org/packages/cc/a3/c2b0333c2716fb3b4c9a973dd113366ac51b4f8d56b500f4f8f704b4817a/setproctitle-1.3.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:690b4776f9c15aaf1023bb07d7c5b797681a17af98a4a69e76a1d504e41108b7", size = 13099, upload-time = "2025-09-05T12:49:09.222Z" }, { url = "https://files.pythonhosted.org/packages/0e/f8/17bda581c517678260e6541b600eeb67745f53596dc077174141ba2f6702/setproctitle-1.3.7-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:00afa6fc507967d8c9d592a887cdc6c1f5742ceac6a4354d111ca0214847732c", size = 31793, upload-time = "2025-09-05T12:49:10.297Z" }, { url = "https://files.pythonhosted.org/packages/27/d1/76a33ae80d4e788ecab9eb9b53db03e81cfc95367ec7e3fbf4989962fedd/setproctitle-1.3.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9e02667f6b9fc1238ba753c0f4b0a37ae184ce8f3bbbc38e115d99646b3f4cd3", size = 32779, upload-time = "2025-09-05T12:49:12.157Z" }, { url = "https://files.pythonhosted.org/packages/59/27/1a07c38121967061564f5e0884414a5ab11a783260450172d4fc68c15621/setproctitle-1.3.7-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:83fcd271567d133eb9532d3b067c8a75be175b2b3b271e2812921a05303a693f", size = 34578, upload-time = "2025-09-05T12:49:13.393Z" }, { url = "https://files.pythonhosted.org/packages/d8/d4/725e6353935962d8bb12cbf7e7abba1d0d738c7f6935f90239d8e1ccf913/setproctitle-1.3.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13fe37951dda1a45c35d77d06e3da5d90e4f875c4918a7312b3b4556cfa7ff64", size = 32030, upload-time = "2025-09-05T12:49:15.362Z" }, { url = "https://files.pythonhosted.org/packages/67/24/e4677ae8e1cb0d549ab558b12db10c175a889be0974c589c428fece5433e/setproctitle-1.3.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a05509cfb2059e5d2ddff701d38e474169e9ce2a298cf1b6fd5f3a213a553fe5", size = 33363, upload-time = "2025-09-05T12:49:16.829Z" }, { url = "https://files.pythonhosted.org/packages/55/d4/69ce66e4373a48fdbb37489f3ded476bb393e27f514968c3a69a67343ae0/setproctitle-1.3.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6da835e76ae18574859224a75db6e15c4c2aaa66d300a57efeaa4c97ca4c7381", size = 31508, upload-time = "2025-09-05T12:49:18.032Z" }, { url = "https://files.pythonhosted.org/packages/4b/5a/42c1ed0e9665d068146a68326529b5686a1881c8b9197c2664db4baf6aeb/setproctitle-1.3.7-cp310-cp310-win32.whl", hash = "sha256:9e803d1b1e20240a93bac0bc1025363f7f80cb7eab67dfe21efc0686cc59ad7c", size = 12558, upload-time = "2025-09-05T12:49:19.742Z" }, { url = "https://files.pythonhosted.org/packages/dc/fe/dd206cc19a25561921456f6cb12b405635319299b6f366e0bebe872abc18/setproctitle-1.3.7-cp310-cp310-win_amd64.whl", hash = "sha256:a97200acc6b64ec4cada52c2ecaf1fba1ef9429ce9c542f8a7db5bcaa9dcbd95", size = 13245, upload-time = "2025-09-05T12:49:21.023Z" }, { url = "https://files.pythonhosted.org/packages/04/cd/1b7ba5cad635510720ce19d7122154df96a2387d2a74217be552887c93e5/setproctitle-1.3.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a600eeb4145fb0ee6c287cb82a2884bd4ec5bbb076921e287039dcc7b7cc6dd0", size = 18085, upload-time = "2025-09-05T12:49:22.183Z" }, { url = "https://files.pythonhosted.org/packages/8f/1a/b2da0a620490aae355f9d72072ac13e901a9fec809a6a24fc6493a8f3c35/setproctitle-1.3.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97a090fed480471bb175689859532709e28c085087e344bca45cf318034f70c4", size = 13097, upload-time = "2025-09-05T12:49:23.322Z" }, { url = "https://files.pythonhosted.org/packages/18/2e/bd03ff02432a181c1787f6fc2a678f53b7dacdd5ded69c318fe1619556e8/setproctitle-1.3.7-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1607b963e7b53e24ec8a2cb4e0ab3ae591d7c6bf0a160feef0551da63452b37f", size = 32191, upload-time = "2025-09-05T12:49:24.567Z" }, { url = "https://files.pythonhosted.org/packages/28/78/1e62fc0937a8549f2220445ed2175daacee9b6764c7963b16148119b016d/setproctitle-1.3.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a20fb1a3974e2dab857870cf874b325b8705605cb7e7e8bcbb915bca896f52a9", size = 33203, upload-time = "2025-09-05T12:49:25.871Z" }, { url = "https://files.pythonhosted.org/packages/a0/3c/65edc65db3fa3df400cf13b05e9d41a3c77517b4839ce873aa6b4043184f/setproctitle-1.3.7-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f8d961bba676e07d77665204f36cffaa260f526e7b32d07ab3df6a2c1dfb44ba", size = 34963, upload-time = "2025-09-05T12:49:27.044Z" }, { url = "https://files.pythonhosted.org/packages/a1/32/89157e3de997973e306e44152522385f428e16f92f3cf113461489e1e2ee/setproctitle-1.3.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:db0fd964fbd3a9f8999b502f65bd2e20883fdb5b1fae3a424e66db9a793ed307", size = 32398, upload-time = "2025-09-05T12:49:28.909Z" }, { url = "https://files.pythonhosted.org/packages/4a/18/77a765a339ddf046844cb4513353d8e9dcd8183da9cdba6e078713e6b0b2/setproctitle-1.3.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:db116850fcf7cca19492030f8d3b4b6e231278e8fe097a043957d22ce1bdf3ee", size = 33657, upload-time = "2025-09-05T12:49:30.323Z" }, { url = "https://files.pythonhosted.org/packages/6b/63/f0b6205c64d74d2a24a58644a38ec77bdbaa6afc13747e75973bf8904932/setproctitle-1.3.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:316664d8b24a5c91ee244460bdaf7a74a707adaa9e14fbe0dc0a53168bb9aba1", size = 31836, upload-time = "2025-09-05T12:49:32.309Z" }, { url = "https://files.pythonhosted.org/packages/ba/51/e1277f9ba302f1a250bbd3eedbbee747a244b3cc682eb58fb9733968f6d8/setproctitle-1.3.7-cp311-cp311-win32.whl", hash = "sha256:b74774ca471c86c09b9d5037c8451fff06bb82cd320d26ae5a01c758088c0d5d", size = 12556, upload-time = "2025-09-05T12:49:33.529Z" }, { url = "https://files.pythonhosted.org/packages/b6/7b/822a23f17e9003dfdee92cd72758441ca2a3680388da813a371b716fb07f/setproctitle-1.3.7-cp311-cp311-win_amd64.whl", hash = "sha256:acb9097213a8dd3410ed9f0dc147840e45ca9797785272928d4be3f0e69e3be4", size = 13243, upload-time = "2025-09-05T12:49:34.553Z" }, { url = "https://files.pythonhosted.org/packages/fb/f0/2dc88e842077719d7384d86cc47403e5102810492b33680e7dadcee64cd8/setproctitle-1.3.7-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2dc99aec591ab6126e636b11035a70991bc1ab7a261da428491a40b84376654e", size = 18049, upload-time = "2025-09-05T12:49:36.241Z" }, { url = "https://files.pythonhosted.org/packages/f0/b4/50940504466689cda65680c9e9a1e518e5750c10490639fa687489ac7013/setproctitle-1.3.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cdd8aa571b7aa39840fdbea620e308a19691ff595c3a10231e9ee830339dd798", size = 13079, upload-time = "2025-09-05T12:49:38.088Z" }, { url = "https://files.pythonhosted.org/packages/d0/99/71630546b9395b095f4082be41165d1078204d1696c2d9baade3de3202d0/setproctitle-1.3.7-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2906b6c7959cdb75f46159bf0acd8cc9906cf1361c9e1ded0d065fe8f9039629", size = 32932, upload-time = "2025-09-05T12:49:39.271Z" }, { url = "https://files.pythonhosted.org/packages/50/22/cee06af4ffcfb0e8aba047bd44f5262e644199ae7527ae2c1f672b86495c/setproctitle-1.3.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6915964a6dda07920a1159321dcd6d94fc7fc526f815ca08a8063aeca3c204f1", size = 33736, upload-time = "2025-09-05T12:49:40.565Z" }, { url = "https://files.pythonhosted.org/packages/5c/00/a5949a8bb06ef5e7df214fc393bb2fb6aedf0479b17214e57750dfdd0f24/setproctitle-1.3.7-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cff72899861c765bd4021d1ff1c68d60edc129711a2fdba77f9cb69ef726a8b6", size = 35605, upload-time = "2025-09-05T12:49:42.362Z" }, { url = "https://files.pythonhosted.org/packages/b0/3a/50caca532a9343828e3bf5778c7a84d6c737a249b1796d50dd680290594d/setproctitle-1.3.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b7cb05bd446687ff816a3aaaf831047fc4c364feff7ada94a66024f1367b448c", size = 33143, upload-time = "2025-09-05T12:49:43.515Z" }, { url = "https://files.pythonhosted.org/packages/ca/14/b843a251296ce55e2e17c017d6b9f11ce0d3d070e9265de4ecad948b913d/setproctitle-1.3.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3a57b9a00de8cae7e2a1f7b9f0c2ac7b69372159e16a7708aa2f38f9e5cc987a", size = 34434, upload-time = "2025-09-05T12:49:45.31Z" }, { url = "https://files.pythonhosted.org/packages/c8/b7/06145c238c0a6d2c4bc881f8be230bb9f36d2bf51aff7bddcb796d5eed67/setproctitle-1.3.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d8828b356114f6b308b04afe398ed93803d7fca4a955dd3abe84430e28d33739", size = 32795, upload-time = "2025-09-05T12:49:46.419Z" }, { url = "https://files.pythonhosted.org/packages/ef/dc/ef76a81fac9bf27b84ed23df19c1f67391a753eed6e3c2254ebcb5133f56/setproctitle-1.3.7-cp312-cp312-win32.whl", hash = "sha256:b0304f905efc845829ac2bc791ddebb976db2885f6171f4a3de678d7ee3f7c9f", size = 12552, upload-time = "2025-09-05T12:49:47.635Z" }, { url = "https://files.pythonhosted.org/packages/e2/5b/a9fe517912cd6e28cf43a212b80cb679ff179a91b623138a99796d7d18a0/setproctitle-1.3.7-cp312-cp312-win_amd64.whl", hash = "sha256:9888ceb4faea3116cf02a920ff00bfbc8cc899743e4b4ac914b03625bdc3c300", size = 13247, upload-time = "2025-09-05T12:49:49.16Z" }, { url = "https://files.pythonhosted.org/packages/5d/2f/fcedcade3b307a391b6e17c774c6261a7166aed641aee00ed2aad96c63ce/setproctitle-1.3.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c3736b2a423146b5e62230502e47e08e68282ff3b69bcfe08a322bee73407922", size = 18047, upload-time = "2025-09-05T12:49:50.271Z" }, { url = "https://files.pythonhosted.org/packages/23/ae/afc141ca9631350d0a80b8f287aac79a76f26b6af28fd8bf92dae70dc2c5/setproctitle-1.3.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3384e682b158d569e85a51cfbde2afd1ab57ecf93ea6651fe198d0ba451196ee", size = 13073, upload-time = "2025-09-05T12:49:51.46Z" }, { url = "https://files.pythonhosted.org/packages/87/ed/0a4f00315bc02510395b95eec3d4aa77c07192ee79f0baae77ea7b9603d8/setproctitle-1.3.7-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0564a936ea687cd24dffcea35903e2a20962aa6ac20e61dd3a207652401492dd", size = 33284, upload-time = "2025-09-05T12:49:52.741Z" }, { url = "https://files.pythonhosted.org/packages/fc/e4/adf3c4c0a2173cb7920dc9df710bcc67e9bcdbf377e243b7a962dc31a51a/setproctitle-1.3.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a5d1cb3f81531f0eb40e13246b679a1bdb58762b170303463cb06ecc296f26d0", size = 34104, upload-time = "2025-09-05T12:49:54.416Z" }, { url = "https://files.pythonhosted.org/packages/52/4f/6daf66394152756664257180439d37047aa9a1cfaa5e4f5ed35e93d1dc06/setproctitle-1.3.7-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a7d159e7345f343b44330cbba9194169b8590cb13dae940da47aa36a72aa9929", size = 35982, upload-time = "2025-09-05T12:49:56.295Z" }, { url = "https://files.pythonhosted.org/packages/1b/62/f2c0595403cf915db031f346b0e3b2c0096050e90e0be658a64f44f4278a/setproctitle-1.3.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0b5074649797fd07c72ca1f6bff0406f4a42e1194faac03ecaab765ce605866f", size = 33150, upload-time = "2025-09-05T12:49:58.025Z" }, { url = "https://files.pythonhosted.org/packages/a0/29/10dd41cde849fb2f9b626c846b7ea30c99c81a18a5037a45cc4ba33c19a7/setproctitle-1.3.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:61e96febced3f61b766115381d97a21a6265a0f29188a791f6df7ed777aef698", size = 34463, upload-time = "2025-09-05T12:49:59.424Z" }, { url = "https://files.pythonhosted.org/packages/71/3c/cedd8eccfaf15fb73a2c20525b68c9477518917c9437737fa0fda91e378f/setproctitle-1.3.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:047138279f9463f06b858e579cc79580fbf7a04554d24e6bddf8fe5dddbe3d4c", size = 32848, upload-time = "2025-09-05T12:50:01.107Z" }, { url = "https://files.pythonhosted.org/packages/d1/3e/0a0e27d1c9926fecccfd1f91796c244416c70bf6bca448d988638faea81d/setproctitle-1.3.7-cp313-cp313-win32.whl", hash = "sha256:7f47accafac7fe6535ba8ba9efd59df9d84a6214565108d0ebb1199119c9cbbd", size = 12544, upload-time = "2025-09-05T12:50:15.81Z" }, { url = "https://files.pythonhosted.org/packages/36/1b/6bf4cb7acbbd5c846ede1c3f4d6b4ee52744d402e43546826da065ff2ab7/setproctitle-1.3.7-cp313-cp313-win_amd64.whl", hash = "sha256:fe5ca35aeec6dc50cabab9bf2d12fbc9067eede7ff4fe92b8f5b99d92e21263f", size = 13235, upload-time = "2025-09-05T12:50:16.89Z" }, { url = "https://files.pythonhosted.org/packages/e6/a4/d588d3497d4714750e3eaf269e9e8985449203d82b16b933c39bd3fc52a1/setproctitle-1.3.7-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:10e92915c4b3086b1586933a36faf4f92f903c5554f3c34102d18c7d3f5378e9", size = 18058, upload-time = "2025-09-05T12:50:02.501Z" }, { url = "https://files.pythonhosted.org/packages/05/77/7637f7682322a7244e07c373881c7e982567e2cb1dd2f31bd31481e45500/setproctitle-1.3.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:de879e9c2eab637f34b1a14c4da1e030c12658cdc69ee1b3e5be81b380163ce5", size = 13072, upload-time = "2025-09-05T12:50:03.601Z" }, { url = "https://files.pythonhosted.org/packages/52/09/f366eca0973cfbac1470068d1313fa3fe3de4a594683385204ec7f1c4101/setproctitle-1.3.7-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c18246d88e227a5b16248687514f95642505000442165f4b7db354d39d0e4c29", size = 34490, upload-time = "2025-09-05T12:50:04.948Z" }, { url = "https://files.pythonhosted.org/packages/71/36/611fc2ed149fdea17c3677e1d0df30d8186eef9562acc248682b91312706/setproctitle-1.3.7-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7081f193dab22df2c36f9fc6d113f3793f83c27891af8fe30c64d89d9a37e152", size = 35267, upload-time = "2025-09-05T12:50:06.015Z" }, { url = "https://files.pythonhosted.org/packages/88/a4/64e77d0671446bd5a5554387b69e1efd915274686844bea733714c828813/setproctitle-1.3.7-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9cc9b901ce129350637426a89cfd650066a4adc6899e47822e2478a74023ff7c", size = 37376, upload-time = "2025-09-05T12:50:07.484Z" }, { url = "https://files.pythonhosted.org/packages/89/bc/ad9c664fe524fb4a4b2d3663661a5c63453ce851736171e454fa2cdec35c/setproctitle-1.3.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:80e177eff2d1ec172188d0d7fd9694f8e43d3aab76a6f5f929bee7bf7894e98b", size = 33963, upload-time = "2025-09-05T12:50:09.056Z" }, { url = "https://files.pythonhosted.org/packages/ab/01/a36de7caf2d90c4c28678da1466b47495cbbad43badb4e982d8db8167ed4/setproctitle-1.3.7-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:23e520776c445478a67ee71b2a3c1ffdafbe1f9f677239e03d7e2cc635954e18", size = 35550, upload-time = "2025-09-05T12:50:10.791Z" }, { url = "https://files.pythonhosted.org/packages/dd/68/17e8aea0ed5ebc17fbf03ed2562bfab277c280e3625850c38d92a7b5fcd9/setproctitle-1.3.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5fa1953126a3b9bd47049d58c51b9dac72e78ed120459bd3aceb1bacee72357c", size = 33727, upload-time = "2025-09-05T12:50:12.032Z" }, { url = "https://files.pythonhosted.org/packages/b2/33/90a3bf43fe3a2242b4618aa799c672270250b5780667898f30663fd94993/setproctitle-1.3.7-cp313-cp313t-win32.whl", hash = "sha256:4a5e212bf438a4dbeece763f4962ad472c6008ff6702e230b4f16a037e2f6f29", size = 12549, upload-time = "2025-09-05T12:50:13.074Z" }, { url = "https://files.pythonhosted.org/packages/0b/0e/50d1f07f3032e1f23d814ad6462bc0a138f369967c72494286b8a5228e40/setproctitle-1.3.7-cp313-cp313t-win_amd64.whl", hash = "sha256:cf2727b733e90b4f874bac53e3092aa0413fe1ea6d4f153f01207e6ce65034d9", size = 13243, upload-time = "2025-09-05T12:50:14.146Z" }, { url = "https://files.pythonhosted.org/packages/89/c7/43ac3a98414f91d1b86a276bc2f799ad0b4b010e08497a95750d5bc42803/setproctitle-1.3.7-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:80c36c6a87ff72eabf621d0c79b66f3bdd0ecc79e873c1e9f0651ee8bf215c63", size = 18052, upload-time = "2025-09-05T12:50:17.928Z" }, { url = "https://files.pythonhosted.org/packages/cd/2c/dc258600a25e1a1f04948073826bebc55e18dbd99dc65a576277a82146fa/setproctitle-1.3.7-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b53602371a52b91c80aaf578b5ada29d311d12b8a69c0c17fbc35b76a1fd4f2e", size = 13071, upload-time = "2025-09-05T12:50:19.061Z" }, { url = "https://files.pythonhosted.org/packages/ab/26/8e3bb082992f19823d831f3d62a89409deb6092e72fc6940962983ffc94f/setproctitle-1.3.7-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fcb966a6c57cf07cc9448321a08f3be6b11b7635be502669bc1d8745115d7e7f", size = 33180, upload-time = "2025-09-05T12:50:20.395Z" }, { url = "https://files.pythonhosted.org/packages/f1/af/ae692a20276d1159dd0cf77b0bcf92cbb954b965655eb4a69672099bb214/setproctitle-1.3.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:46178672599b940368d769474fe13ecef1b587d58bb438ea72b9987f74c56ea5", size = 34043, upload-time = "2025-09-05T12:50:22.454Z" }, { url = "https://files.pythonhosted.org/packages/34/b2/6a092076324dd4dac1a6d38482bedebbff5cf34ef29f58585ec76e47bc9d/setproctitle-1.3.7-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7f9e9e3ff135cbcc3edd2f4cf29b139f4aca040d931573102742db70ff428c17", size = 35892, upload-time = "2025-09-05T12:50:23.937Z" }, { url = "https://files.pythonhosted.org/packages/1c/1a/8836b9f28cee32859ac36c3df85aa03e1ff4598d23ea17ca2e96b5845a8f/setproctitle-1.3.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:14c7eba8d90c93b0e79c01f0bd92a37b61983c27d6d7d5a3b5defd599113d60e", size = 32898, upload-time = "2025-09-05T12:50:25.617Z" }, { url = "https://files.pythonhosted.org/packages/ef/22/8fabdc24baf42defb599714799d8445fe3ae987ec425a26ec8e80ea38f8e/setproctitle-1.3.7-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:9e64e98077fb30b6cf98073d6c439cd91deb8ebbf8fc62d9dbf52bd38b0c6ac0", size = 34308, upload-time = "2025-09-05T12:50:26.827Z" }, { url = "https://files.pythonhosted.org/packages/15/1b/b9bee9de6c8cdcb3b3a6cb0b3e773afdb86bbbc1665a3bfa424a4294fda2/setproctitle-1.3.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b91387cc0f02a00ac95dcd93f066242d3cca10ff9e6153de7ee07069c6f0f7c8", size = 32536, upload-time = "2025-09-05T12:50:28.5Z" }, { url = "https://files.pythonhosted.org/packages/37/0c/75e5f2685a5e3eda0b39a8b158d6d8895d6daf3ba86dec9e3ba021510272/setproctitle-1.3.7-cp314-cp314-win32.whl", hash = "sha256:52b054a61c99d1b72fba58b7f5486e04b20fefc6961cd76722b424c187f362ed", size = 12731, upload-time = "2025-09-05T12:50:43.955Z" }, { url = "https://files.pythonhosted.org/packages/d2/ae/acddbce90d1361e1786e1fb421bc25baeb0c22ef244ee5d0176511769ec8/setproctitle-1.3.7-cp314-cp314-win_amd64.whl", hash = "sha256:5818e4080ac04da1851b3ec71e8a0f64e3748bf9849045180566d8b736702416", size = 13464, upload-time = "2025-09-05T12:50:45.057Z" }, { url = "https://files.pythonhosted.org/packages/01/6d/20886c8ff2e6d85e3cabadab6aab9bb90acaf1a5cfcb04d633f8d61b2626/setproctitle-1.3.7-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:6fc87caf9e323ac426910306c3e5d3205cd9f8dcac06d233fcafe9337f0928a3", size = 18062, upload-time = "2025-09-05T12:50:29.78Z" }, { url = "https://files.pythonhosted.org/packages/9a/60/26dfc5f198715f1343b95c2f7a1c16ae9ffa45bd89ffd45a60ed258d24ea/setproctitle-1.3.7-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6134c63853d87a4897ba7d5cc0e16abfa687f6c66fc09f262bb70d67718f2309", size = 13075, upload-time = "2025-09-05T12:50:31.604Z" }, { url = "https://files.pythonhosted.org/packages/21/9c/980b01f50d51345dd513047e3ba9e96468134b9181319093e61db1c47188/setproctitle-1.3.7-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1403d2abfd32790b6369916e2313dffbe87d6b11dca5bbd898981bcde48e7a2b", size = 34744, upload-time = "2025-09-05T12:50:32.777Z" }, { url = "https://files.pythonhosted.org/packages/86/b4/82cd0c86e6d1c4538e1a7eb908c7517721513b801dff4ba3f98ef816a240/setproctitle-1.3.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e7c5bfe4228ea22373e3025965d1a4116097e555ee3436044f5c954a5e63ac45", size = 35589, upload-time = "2025-09-05T12:50:34.13Z" }, { url = "https://files.pythonhosted.org/packages/8a/4f/9f6b2a7417fd45673037554021c888b31247f7594ff4bd2239918c5cd6d0/setproctitle-1.3.7-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:585edf25e54e21a94ccb0fe81ad32b9196b69ebc4fc25f81da81fb8a50cca9e4", size = 37698, upload-time = "2025-09-05T12:50:35.524Z" }, { url = "https://files.pythonhosted.org/packages/20/92/927b7d4744aac214d149c892cb5fa6dc6f49cfa040cb2b0a844acd63dcaf/setproctitle-1.3.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:96c38cdeef9036eb2724c2210e8d0b93224e709af68c435d46a4733a3675fee1", size = 34201, upload-time = "2025-09-05T12:50:36.697Z" }, { url = "https://files.pythonhosted.org/packages/0a/0c/fd4901db5ba4b9d9013e62f61d9c18d52290497f956745cd3e91b0d80f90/setproctitle-1.3.7-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:45e3ef48350abb49cf937d0a8ba15e42cee1e5ae13ca41a77c66d1abc27a5070", size = 35801, upload-time = "2025-09-05T12:50:38.314Z" }, { url = "https://files.pythonhosted.org/packages/e7/e3/54b496ac724e60e61cc3447f02690105901ca6d90da0377dffe49ff99fc7/setproctitle-1.3.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:1fae595d032b30dab4d659bece20debd202229fce12b55abab978b7f30783d73", size = 33958, upload-time = "2025-09-05T12:50:39.841Z" }, { url = "https://files.pythonhosted.org/packages/ea/a8/c84bb045ebf8c6fdc7f7532319e86f8380d14bbd3084e6348df56bdfe6fd/setproctitle-1.3.7-cp314-cp314t-win32.whl", hash = "sha256:02432f26f5d1329ab22279ff863c83589894977063f59e6c4b4845804a08f8c2", size = 12745, upload-time = "2025-09-05T12:50:41.377Z" }, { url = "https://files.pythonhosted.org/packages/08/b6/3a5a4f9952972791a9114ac01dfc123f0df79903577a3e0a7a404a695586/setproctitle-1.3.7-cp314-cp314t-win_amd64.whl", hash = "sha256:cbc388e3d86da1f766d8fc2e12682e446064c01cea9f88a88647cfe7c011de6a", size = 13469, upload-time = "2025-09-05T12:50:42.67Z" }, { url = "https://files.pythonhosted.org/packages/34/8a/aff5506ce89bc3168cb492b18ba45573158d528184e8a9759a05a09088a9/setproctitle-1.3.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:eb440c5644a448e6203935ed60466ec8d0df7278cd22dc6cf782d07911bcbea6", size = 12654, upload-time = "2025-09-05T12:51:17.141Z" }, { url = "https://files.pythonhosted.org/packages/41/89/5b6f2faedd6ced3d3c085a5efbd91380fb1f61f4c12bc42acad37932f4e9/setproctitle-1.3.7-pp310-pypy310_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:502b902a0e4c69031b87870ff4986c290ebbb12d6038a70639f09c331b18efb2", size = 14284, upload-time = "2025-09-05T12:51:18.393Z" }, { url = "https://files.pythonhosted.org/packages/0a/c0/4312fed3ca393a29589603fd48f17937b4ed0638b923bac75a728382e730/setproctitle-1.3.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f6f268caeabb37ccd824d749e7ce0ec6337c4ed954adba33ec0d90cc46b0ab78", size = 13282, upload-time = "2025-09-05T12:51:19.703Z" }, { url = "https://files.pythonhosted.org/packages/c3/5b/5e1c117ac84e3cefcf8d7a7f6b2461795a87e20869da065a5c087149060b/setproctitle-1.3.7-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:b1cac6a4b0252b8811d60b6d8d0f157c0fdfed379ac89c25a914e6346cf355a1", size = 12587, upload-time = "2025-09-05T12:51:21.195Z" }, { url = "https://files.pythonhosted.org/packages/73/02/b9eadc226195dcfa90eed37afe56b5dd6fa2f0e5220ab8b7867b8862b926/setproctitle-1.3.7-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f1704c9e041f2b1dc38f5be4552e141e1432fba3dd52c72eeffd5bc2db04dc65", size = 14286, upload-time = "2025-09-05T12:51:22.61Z" }, { url = "https://files.pythonhosted.org/packages/28/26/1be1d2a53c2a91ec48fa2ff4a409b395f836798adf194d99de9c059419ea/setproctitle-1.3.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b08b61976ffa548bd5349ce54404bf6b2d51bd74d4f1b241ed1b0f25bce09c3a", size = 13282, upload-time = "2025-09-05T12:51:24.094Z" }, ] [[package]] name = "tomli" version = "2.4.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" }, { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" }, { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" }, { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" }, { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" }, { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" }, { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" }, { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" }, { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" }, { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" }, { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" }, { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" }, { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" }, { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" }, { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" }, { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" }, { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" }, { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" }, { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" }, { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" }, { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" }, { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" }, { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" }, { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" }, { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" }, { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" }, { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" }, { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" }, { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" }, { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" }, { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" }, { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" }, { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" }, { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" }, { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" }, { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" }, { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" }, { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" }, { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" }, { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" }, { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" }, { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" }, { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" }, { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" }, { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" }, { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] [[package]] name = "tornado" version = "6.5.4" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/37/1d/0a336abf618272d53f62ebe274f712e213f5a03c0b2339575430b8362ef2/tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7", size = 513632, upload-time = "2025-12-15T19:21:03.836Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/ab/a9/e94a9d5224107d7ce3cc1fab8d5dc97f5ea351ccc6322ee4fb661da94e35/tornado-6.5.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d6241c1a16b1c9e4cc28148b1cda97dd1c6cb4fb7068ac1bedc610768dff0ba9", size = 443909, upload-time = "2025-12-15T19:20:48.382Z" }, { url = "https://files.pythonhosted.org/packages/db/7e/f7b8d8c4453f305a51f80dbb49014257bb7d28ccb4bbb8dd328ea995ecad/tornado-6.5.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2d50f63dda1d2cac3ae1fa23d254e16b5e38153758470e9956cbc3d813d40843", size = 442163, upload-time = "2025-12-15T19:20:49.791Z" }, { url = "https://files.pythonhosted.org/packages/ba/b5/206f82d51e1bfa940ba366a8d2f83904b15942c45a78dd978b599870ab44/tornado-6.5.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cf66105dc6acb5af613c054955b8137e34a03698aa53272dbda4afe252be17", size = 445746, upload-time = "2025-12-15T19:20:51.491Z" }, { url = "https://files.pythonhosted.org/packages/8e/9d/1a3338e0bd30ada6ad4356c13a0a6c35fbc859063fa7eddb309183364ac1/tornado-6.5.4-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ff0a58b0dc97939d29da29cd624da010e7f804746621c78d14b80238669335", size = 445083, upload-time = "2025-12-15T19:20:52.778Z" }, { url = "https://files.pythonhosted.org/packages/50/d4/e51d52047e7eb9a582da59f32125d17c0482d065afd5d3bc435ff2120dc5/tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f", size = 445315, upload-time = "2025-12-15T19:20:53.996Z" }, { url = "https://files.pythonhosted.org/packages/27/07/2273972f69ca63dbc139694a3fc4684edec3ea3f9efabf77ed32483b875c/tornado-6.5.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9c86b1643b33a4cd415f8d0fe53045f913bf07b4a3ef646b735a6a86047dda84", size = 446003, upload-time = "2025-12-15T19:20:56.101Z" }, { url = "https://files.pythonhosted.org/packages/d1/83/41c52e47502bf7260044413b6770d1a48dda2f0246f95ee1384a3cd9c44a/tornado-6.5.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:6eb82872335a53dd063a4f10917b3efd28270b56a33db69009606a0312660a6f", size = 445412, upload-time = "2025-12-15T19:20:57.398Z" }, { url = "https://files.pythonhosted.org/packages/10/c7/bc96917f06cbee182d44735d4ecde9c432e25b84f4c2086143013e7b9e52/tornado-6.5.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6076d5dda368c9328ff41ab5d9dd3608e695e8225d1cd0fd1e006f05da3635a8", size = 445392, upload-time = "2025-12-15T19:20:58.692Z" }, { url = "https://files.pythonhosted.org/packages/0c/1a/d7592328d037d36f2d2462f4bc1fbb383eec9278bc786c1b111cbbd44cfa/tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1", size = 446481, upload-time = "2025-12-15T19:21:00.008Z" }, { url = "https://files.pythonhosted.org/packages/d6/6d/c69be695a0a64fd37a97db12355a035a6d90f79067a3cf936ec2b1dc38cd/tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc", size = 446886, upload-time = "2025-12-15T19:21:01.287Z" }, { url = "https://files.pythonhosted.org/packages/50/49/8dc3fd90902f70084bd2cd059d576ddb4f8bb44c2c7c0e33a11422acb17e/tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1", size = 445910, upload-time = "2025-12-15T19:21:02.571Z" }, ] [[package]] name = "typing-extensions" version = "4.15.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] [[package]] name = "zope-event" version = "6.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/46/33/d3eeac228fc14de76615612ee208be2d8a5b5b0fada36bf9b62d6b40600c/zope_event-6.1.tar.gz", hash = "sha256:6052a3e0cb8565d3d4ef1a3a7809336ac519bc4fe38398cb8d466db09adef4f0", size = 18739, upload-time = "2025-11-07T08:05:49.934Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/c2/b0/956902e5e1302f8c5d124e219c6bf214e2649f92ad5fce85b05c039a04c9/zope_event-6.1-py3-none-any.whl", hash = "sha256:0ca78b6391b694272b23ec1335c0294cc471065ed10f7f606858fc54566c25a0", size = 6414, upload-time = "2025-11-07T08:05:48.874Z" }, ] [[package]] name = "zope-interface" version = "8.2" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/86/a4/77daa5ba398996d16bb43fc721599d27d03eae68fe3c799de1963c72e228/zope_interface-8.2.tar.gz", hash = "sha256:afb20c371a601d261b4f6edb53c3c418c249db1a9717b0baafc9a9bb39ba1224", size = 254019, upload-time = "2026-01-09T07:51:07.253Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/b1/fa/6d9eb3a33998a3019d7eb4fa1802d01d6602fad90e0aea443e6e0fe8e49a/zope_interface-8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:788c293f3165964ec6527b2d861072c68eef53425213f36d3893ebee89a89623", size = 207541, upload-time = "2026-01-09T08:04:55.378Z" }, { url = "https://files.pythonhosted.org/packages/19/8c/ad23c96fdee84cb1f768f6695dac187cc26e9038e01c69713ba0f7dc46ab/zope_interface-8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9a4e785097e741a1c953b3970ce28f2823bd63c00adc5d276f2981dd66c96c15", size = 208075, upload-time = "2026-01-09T08:04:57.118Z" }, { url = "https://files.pythonhosted.org/packages/dd/35/1bfd5fec31a307f0cf4065ee74ade63858ded3e2a71e248f1508118fcc95/zope_interface-8.2-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:16c69da19a06566664ddd4785f37cad5693a51d48df1515d264c20d005d322e2", size = 249528, upload-time = "2026-01-09T08:04:59.074Z" }, { url = "https://files.pythonhosted.org/packages/c6/3a/5d50b5fdb0f8226a2edff6adb7efdd3762ec95dff827dbab1761cb9a9e85/zope_interface-8.2-cp310-cp310-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c31acfa3d7cde48bec45701b0e1f4698daffc378f559bfb296837d8c834732f6", size = 254646, upload-time = "2026-01-09T08:05:00.964Z" }, { url = "https://files.pythonhosted.org/packages/2f/2a/ee7d675e151578eaf77828b8faac2b7ed9a69fead350bf5cf0e4afe7c73d/zope_interface-8.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0723507127f8269b8f3f22663168f717e9c9742107d1b6c9f419df561b71aa6d", size = 255083, upload-time = "2026-01-09T08:05:02.857Z" }, { url = "https://files.pythonhosted.org/packages/5d/07/99e2342f976c3700e142eddc01524e375a9e9078869a6885d9c72f3a3659/zope_interface-8.2-cp310-cp310-win_amd64.whl", hash = "sha256:3bf73a910bb27344def2d301a03329c559a79b308e1e584686b74171d736be4e", size = 211924, upload-time = "2026-01-09T08:05:04.702Z" }, { url = "https://files.pythonhosted.org/packages/98/97/9c2aa8caae79915ed64eb114e18816f178984c917aa9adf2a18345e4f2e5/zope_interface-8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c65ade7ea85516e428651048489f5e689e695c79188761de8c622594d1e13322", size = 208081, upload-time = "2026-01-09T08:05:06.623Z" }, { url = "https://files.pythonhosted.org/packages/34/86/4e2fcb01a8f6780ac84923748e450af0805531f47c0956b83065c99ab543/zope_interface-8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1ef4b43659e1348f35f38e7d1a6bbc1682efde239761f335ffc7e31e798b65b", size = 208522, upload-time = "2026-01-09T08:05:07.986Z" }, { url = "https://files.pythonhosted.org/packages/f6/eb/08e277da32ddcd4014922854096cf6dcb7081fad415892c2da1bedefbf02/zope_interface-8.2-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:dfc4f44e8de2ff4eba20af4f0a3ca42d3c43ab24a08e49ccd8558b7a4185b466", size = 255198, upload-time = "2026-01-09T08:05:09.532Z" }, { url = "https://files.pythonhosted.org/packages/ea/a1/b32484f3281a5dc83bc713ad61eca52c543735cdf204543172087a074a74/zope_interface-8.2-cp311-cp311-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8f094bfb49179ec5dc9981cb769af1275702bd64720ef94874d9e34da1390d4c", size = 259970, upload-time = "2026-01-09T08:05:11.477Z" }, { url = "https://files.pythonhosted.org/packages/f6/81/bca0e8ae1e487d4093a8a7cfed2118aa2d4758c8cfd66e59d2af09d71f1c/zope_interface-8.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d2bb8e7364e18f083bf6744ccf30433b2a5f236c39c95df8514e3c13007098ce", size = 261153, upload-time = "2026-01-09T08:05:13.402Z" }, { url = "https://files.pythonhosted.org/packages/40/1e/e3ff2a708011e56b10b271b038d4cb650a8ad5b7d24352fe2edf6d6b187a/zope_interface-8.2-cp311-cp311-win_amd64.whl", hash = "sha256:6f4b4dfcfdfaa9177a600bb31cebf711fdb8c8e9ed84f14c61c420c6aa398489", size = 212330, upload-time = "2026-01-09T08:05:15.267Z" }, { url = "https://files.pythonhosted.org/packages/e0/a0/1e1fabbd2e9c53ef92b69df6d14f4adc94ec25583b1380336905dc37e9a0/zope_interface-8.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:624b6787fc7c3e45fa401984f6add2c736b70a7506518c3b537ffaacc4b29d4c", size = 208785, upload-time = "2026-01-09T08:05:17.348Z" }, { url = "https://files.pythonhosted.org/packages/c3/2a/88d098a06975c722a192ef1fb7d623d1b57c6a6997cf01a7aabb45ab1970/zope_interface-8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bc9ded9e97a0ed17731d479596ed1071e53b18e6fdb2fc33af1e43f5fd2d3aaa", size = 208976, upload-time = "2026-01-09T08:05:18.792Z" }, { url = "https://files.pythonhosted.org/packages/e9/e8/757398549fdfd2f8c89f32c82ae4d2f0537ae2a5d2f21f4a2f711f5a059f/zope_interface-8.2-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:532367553e4420c80c0fc0cabcc2c74080d495573706f66723edee6eae53361d", size = 259411, upload-time = "2026-01-09T08:05:20.567Z" }, { url = "https://files.pythonhosted.org/packages/91/af/502601f0395ce84dff622f63cab47488657a04d0065547df42bee3a680ff/zope_interface-8.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2bf9cf275468bafa3c72688aad8cfcbe3d28ee792baf0b228a1b2d93bd1d541a", size = 264859, upload-time = "2026-01-09T08:05:22.234Z" }, { url = "https://files.pythonhosted.org/packages/89/0c/d2f765b9b4814a368a7c1b0ac23b68823c6789a732112668072fe596945d/zope_interface-8.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0009d2d3c02ea783045d7804da4fd016245e5c5de31a86cebba66dd6914d59a2", size = 264398, upload-time = "2026-01-09T08:05:23.853Z" }, { url = "https://files.pythonhosted.org/packages/4a/81/2f171fbc4222066957e6b9220c4fb9146792540102c37e6d94e5d14aad97/zope_interface-8.2-cp312-cp312-win_amd64.whl", hash = "sha256:845d14e580220ae4544bd4d7eb800f0b6034fe5585fc2536806e0a26c2ee6640", size = 212444, upload-time = "2026-01-09T08:05:25.148Z" }, { url = "https://files.pythonhosted.org/packages/66/47/45188fb101fa060b20e6090e500682398ab415e516a0c228fbb22bc7def2/zope_interface-8.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:6068322004a0158c80dfd4708dfb103a899635408c67c3b10e9acec4dbacefec", size = 209170, upload-time = "2026-01-09T08:05:26.616Z" }, { url = "https://files.pythonhosted.org/packages/09/03/f6b9336c03c2b48403c4eb73a1ec961d94dc2fb5354c583dfb5fa05fd41f/zope_interface-8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2499de92e8275d0dd68f84425b3e19e9268cd1fa8507997900fa4175f157733c", size = 209229, upload-time = "2026-01-09T08:05:28.521Z" }, { url = "https://files.pythonhosted.org/packages/07/b1/65fe1dca708569f302ade02e6cdca309eab6752bc9f80105514f5b708651/zope_interface-8.2-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f777e68c76208503609c83ca021a6864902b646530a1a39abb9ed310d1100664", size = 259393, upload-time = "2026-01-09T08:05:29.897Z" }, { url = "https://files.pythonhosted.org/packages/eb/a5/97b49cfceb6ed53d3dcfb3f3ebf24d83b5553194f0337fbbb3a9fec6cf78/zope_interface-8.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b05a919fdb0ed6ea942e5a7800e09a8b6cdae6f98fee1bef1c9d1a3fc43aaa0", size = 264863, upload-time = "2026-01-09T08:05:31.501Z" }, { url = "https://files.pythonhosted.org/packages/cb/02/0b7a77292810efe3a0586a505b077ebafd5114e10c6e6e659f0c8e387e1f/zope_interface-8.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ccc62b5712dd7bd64cfba3ee63089fb11e840f5914b990033beeae3b2180b6cb", size = 264369, upload-time = "2026-01-09T08:05:32.941Z" }, { url = "https://files.pythonhosted.org/packages/fb/1d/0d1ff3846302ed1b5bbf659316d8084b30106770a5f346b7ff4e9f540f80/zope_interface-8.2-cp313-cp313-win_amd64.whl", hash = "sha256:34f877d1d3bb7565c494ed93828fa6417641ca26faf6e8f044e0d0d500807028", size = 212447, upload-time = "2026-01-09T08:05:35.064Z" }, { url = "https://files.pythonhosted.org/packages/1a/da/3c89de3917751446728b8898b4d53318bc2f8f6bf8196e150a063c59905e/zope_interface-8.2-cp314-cp314-macosx_10_9_x86_64.whl", hash = "sha256:46c7e4e8cbc698398a67e56ca985d19cb92365b4aafbeb6a712e8c101090f4cb", size = 209223, upload-time = "2026-01-09T08:05:36.449Z" }, { url = "https://files.pythonhosted.org/packages/00/7f/62d00ec53f0a6e5df0c984781e6f3999ed265129c4c3413df8128d1e0207/zope_interface-8.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a87fc7517f825a97ff4a4ca4c8a950593c59e0f8e7bfe1b6f898a38d5ba9f9cf", size = 209366, upload-time = "2026-01-09T08:05:38.197Z" }, { url = "https://files.pythonhosted.org/packages/ef/a2/f241986315174be8e00aabecfc2153cf8029c1327cab8ed53a9d979d7e08/zope_interface-8.2-cp314-cp314-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:ccf52f7d44d669203c2096c1a0c2c15d52e36b2e7a9413df50f48392c7d4d080", size = 261037, upload-time = "2026-01-09T08:05:39.568Z" }, { url = "https://files.pythonhosted.org/packages/02/cc/b321c51d6936ede296a1b8860cf173bee2928357fe1fff7f97234899173f/zope_interface-8.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:aae807efc7bd26302eb2fea05cd6de7d59269ed6ae23a6de1ee47add6de99b8c", size = 264219, upload-time = "2026-01-09T08:05:41.624Z" }, { url = "https://files.pythonhosted.org/packages/ab/fb/5f5e7b40a2f4efd873fe173624795ca47eaa22e29051270c981361b45209/zope_interface-8.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:05a0e42d6d830f547e114de2e7cd15750dc6c0c78f8138e6c5035e51ddfff37c", size = 264390, upload-time = "2026-01-09T08:05:42.936Z" }, { url = "https://files.pythonhosted.org/packages/f9/82/3f2bc594370bc3abd58e5f9085d263bf682a222f059ed46275cde0570810/zope_interface-8.2-cp314-cp314-win_amd64.whl", hash = "sha256:561ce42390bee90bae51cf1c012902a8033b2aaefbd0deed81e877562a116d48", size = 212585, upload-time = "2026-01-09T08:05:44.419Z" }, ]