pax_global_header00006660000000000000000000000064146322737600014524gustar00rootroot0000000000000052 comment=1ced441eabd969bc3ca44bfc8f27c70e24c75563 wurlitzer-3.1.1/000077500000000000000000000000001463227376000135755ustar00rootroot00000000000000wurlitzer-3.1.1/.auto-changelog000066400000000000000000000001561463227376000164750ustar00rootroot00000000000000{ "unreleased": true, "commitLimit": 0, "ignoreCommitPattern": "(Bump version|pre-commit|dependabot)" } wurlitzer-3.1.1/.flake8000066400000000000000000000007601463227376000147530ustar00rootroot00000000000000[flake8] # Ignore style and complexity # E: style errors # W: style warnings # C: complexity # F401: module imported but unused # F403: import * # F811: redefinition of unused `name` from line `N` # F841: local variable assigned but never used # E402: module level import not at top of file # I100: Import statements are in the wrong order # I101: Imported names are in the wrong order ignore = E, C, W, F401, F403, F811, F841, E402, I100, I101, D400 exclude = .cache, .github, build wurlitzer-3.1.1/.github/000077500000000000000000000000001463227376000151355ustar00rootroot00000000000000wurlitzer-3.1.1/.github/workflows/000077500000000000000000000000001463227376000171725ustar00rootroot00000000000000wurlitzer-3.1.1/.github/workflows/publish.yaml000066400000000000000000000015531463227376000215300ustar00rootroot00000000000000# Build releases and (on tags) publish to PyPI name: Release # always build releases (to make sure wheel-building works) # but only publish to PyPI on tags on: push: tags: - "*" branches: - main pull_request: jobs: build-release: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: 3.8 - name: install build package run: | pip install --upgrade pip pip install build pip freeze - name: build release run: | python -m build --sdist --wheel . ls -l dist - name: publish to pypi uses: pypa/gh-action-pypi-publish@v1.4.1 if: startsWith(github.ref, 'refs/tags/') with: user: __token__ password: ${{ secrets.pypi_password }} wurlitzer-3.1.1/.github/workflows/test-vm.yaml000066400000000000000000000026261463227376000214630ustar00rootroot00000000000000name: test-vm # run tests in a VM via vagrant # GHA doesn't support freebsd, but we can test there on: pull_request: push: branches: - "main" jobs: test: # ref: https://github.com/jonashackt/vagrant-github-actions # MIT License # after macos-12, vagrant stops working runs-on: macos-12 strategy: fail-fast: true matrix: box: - freebsd steps: - uses: actions/checkout@v2 - name: Cache Vagrant boxes uses: actions/cache@v2 with: path: ~/.vagrant.d/boxes key: ${{ runner.os }}-vagrant-${{ hashFiles('Vagrantfile') }} restore-keys: | ${{ runner.os }}-vagrant- - name: Show Vagrant version run: vagrant --version - name: Run vagrant up run: vagrant up ${{ matrix.box }} - name: install dependencies run: | vagrant ssh ${{ matrix.box }} -c " cd /vagrant pip install --upgrade pip pip install --upgrade --pre -r dev-requirements.txt . pip freeze " - name: Run tests run: | vagrant ssh ${{ matrix.box }} -c " cd /vagrant pytest -v --color=yes --cov=wurlitzer test.py " - name: Submit codecov report run: | vagrant ssh ${{ matrix.box }} -c " cd /vagrant codecov " wurlitzer-3.1.1/.github/workflows/test.yaml000066400000000000000000000026021463227376000210350ustar00rootroot00000000000000name: Test on: pull_request: push: branches: - main jobs: test: runs-on: ${{ matrix.os }} strategy: # Keep running even if one variation of the job fail fail-fast: false matrix: os: - ubuntu-20.04 python: - "3.5" - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" include: - os: macos-latest python: "3.12" steps: - uses: actions/checkout@v2 - name: Install Python ${{ matrix.python }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python }} # preserve pip cache to speed up installation - name: Cache pip uses: actions/cache@v2 with: path: ~/.cache/pip # Look to see if there is a cache hit for the corresponding requirements file key: ${{ runner.os }}-pip-${{ hashFiles('*requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - name: Install Python dependencies run: | pip install --upgrade pip pip install --upgrade --pre -r dev-requirements.txt . pip freeze - name: Run tests run: | pytest -v --color=yes --cov=wurlitzer test.py - name: Submit codecov report run: | codecov wurlitzer-3.1.1/.gitignore000066400000000000000000000014411463227376000155650ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging MANIFEST .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache .pytest_cache nosetests.xml coverage.xml *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ # Notebooks .ipynb_checkpoints # vagrant .vagrant wurlitzer-3.1.1/.pre-commit-config.yaml000066400000000000000000000012021463227376000200510ustar00rootroot00000000000000ci: autoupdate_schedule: quarterly repos: - repo: https://github.com/pycqa/isort rev: 5.13.2 hooks: - id: isort - repo: https://github.com/psf/black rev: 24.3.0 hooks: - id: black - repo: https://github.com/pre-commit/mirrors-prettier rev: v4.0.0-alpha.8 hooks: - id: prettier - repo: https://github.com/PyCQA/flake8 rev: "7.0.0" hooks: - id: flake8 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: end-of-file-fixer - id: check-case-conflict - id: check-executables-have-shebangs - id: requirements-txt-fixer wurlitzer-3.1.1/CHANGELOG.md000066400000000000000000000147461463227376000154220ustar00rootroot00000000000000### Changelog All notable changes to this project will be documented in this file. Dates are displayed in UTC. Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog). #### [3.1.1](https://github.com/minrk/wurlitzer/compare/3.1.0...3.1.1) - fix restore after capture with optimized file pipes [`#89`](https://github.com/minrk/wurlitzer/pull/89) - Minor readme clarification [`#86`](https://github.com/minrk/wurlitzer/pull/86) - sys_pipes: check that sys.stdout isn't being forwarded to itself [`#85`](https://github.com/minrk/wurlitzer/pull/85) #### [3.1.0](https://github.com/minrk/wurlitzer/compare/3.0.3...3.1.0) > 29 April 2024 - accept fileno-having objects for GIL-less capture [`#83`](https://github.com/minrk/wurlitzer/pull/83) - update Python versions in test matrix [`#82`](https://github.com/minrk/wurlitzer/pull/82) - accept Logger objects in pipes [`#81`](https://github.com/minrk/wurlitzer/pull/81) - Update `pipes`'s docstring [`#78`](https://github.com/minrk/wurlitzer/pull/78) #### [3.0.3](https://github.com/minrk/wurlitzer/compare/3.0.2...3.0.3) > 1 December 2022 - ci: update packages in freebsd [`#70`](https://github.com/minrk/wurlitzer/pull/70) - Include STDOUT and PIPE in public API [`#69`](https://github.com/minrk/wurlitzer/pull/69) - use 2\*\*18 for bufsize test [`#61`](https://github.com/minrk/wurlitzer/pull/61) - Try running tests on freebsd [`#60`](https://github.com/minrk/wurlitzer/pull/60) - Add details to the failed-to-set-pipe-buffer-size warning [`#58`](https://github.com/minrk/wurlitzer/pull/58) - Add details to the failed-to-set-pipe-buffer-size warning, closes #5757 [`#5757`](https://github.com/minrk/wurlitzer/issues/5757) #### [3.0.2](https://github.com/minrk/wurlitzer/compare/3.0.1...3.0.2) > 25 August 2021 - import F_GETPIPE_SZ along with F_SETPIPE_SZ [`#54`](https://github.com/minrk/wurlitzer/pull/54) #### [3.0.1](https://github.com/minrk/wurlitzer/compare/3.0.0...3.0.1) > 24 August 2021 - fix setting pipe buffer size [`#52`](https://github.com/minrk/wurlitzer/pull/52) ### [3.0.0](https://github.com/minrk/wurlitzer/compare/2.1.1...3.0.0) > 19 August 2021 - fallback on cffi to detect stdout/stderr pointers [`#50`](https://github.com/minrk/wurlitzer/pull/50) - use F_SETPIPE_SZ ot set large pipe page size, if available [`#51`](https://github.com/minrk/wurlitzer/pull/51) - use Bytes/StringIO for pipes [`#49`](https://github.com/minrk/wurlitzer/pull/49) #### [2.1.1](https://github.com/minrk/wurlitzer/compare/2.1.0...2.1.1) > 21 July 2021 - explicitly close the poller when we are done with it [`#47`](https://github.com/minrk/wurlitzer/pull/47) - fix typo in importing unittest.mock [`#45`](https://github.com/minrk/wurlitzer/pull/45) #### [2.1.0](https://github.com/minrk/wurlitzer/compare/2.0.1...2.1.0) > 30 March 2021 - Don't enable extension if sys.**stdout|err** is None [`#44`](https://github.com/minrk/wurlitzer/pull/44) - github actions, pre-commit [`#43`](https://github.com/minrk/wurlitzer/pull/43) - Fix crash in check for terminal ipython [`#42`](https://github.com/minrk/wurlitzer/pull/42) - call stop_sys_pipes explicitly after unregister [`#40`](https://github.com/minrk/wurlitzer/pull/40) #### [2.0.1](https://github.com/minrk/wurlitzer/compare/2.0.0...2.0.1) > 6 July 2020 - regen changelog [`b0b028f`](https://github.com/minrk/wurlitzer/commit/b0b028f12e129844d813718d94656ab7e2a89678) - add autocommit config file [`49227f1`](https://github.com/minrk/wurlitzer/commit/49227f18edaac240602f4dd37fc8ed79e372825c) - Merge pull request #38 from minrk/flush-on-exit [`d24f50c`](https://github.com/minrk/wurlitzer/commit/d24f50c611164a3468622ca2ed80efc3abec8641) ### [2.0.0](https://github.com/minrk/wurlitzer/compare/1.0.3...2.0.0) > 25 October 2019 - use selectors instead of select.poll [`#34`](https://github.com/minrk/wurlitzer/pull/34) #### [1.0.3](https://github.com/minrk/wurlitzer/compare/1.0.2...1.0.3) > 13 June 2019 - PR: Add thread lock [`#30`](https://github.com/minrk/wurlitzer/pull/30) - update packages on travis [`#31`](https://github.com/minrk/wurlitzer/pull/31) - test on mac [`#25`](https://github.com/minrk/wurlitzer/pull/25) - select.poll timeout is in milliseconds [`#26`](https://github.com/minrk/wurlitzer/pull/26) - using poll instead of select in forwarder [`#24`](https://github.com/minrk/wurlitzer/pull/24) - setup.py improvements [`#19`](https://github.com/minrk/wurlitzer/pull/19) - Link blogpost about redirecting stdout/stderr [`#18`](https://github.com/minrk/wurlitzer/pull/18) - fixes #27? [`#27`](https://github.com/minrk/wurlitzer/issues/27) #### [1.0.2](https://github.com/minrk/wurlitzer/compare/1.0.1...1.0.2) > 20 May 2018 - move fflush to a thread [`#16`](https://github.com/minrk/wurlitzer/pull/16) #### [1.0.1](https://github.com/minrk/wurlitzer/compare/1.0.0...1.0.1) > 22 January 2018 - Test more Pythons [`#13`](https://github.com/minrk/wurlitzer/pull/13) - avoid unnecessary close of original FDs [`#11`](https://github.com/minrk/wurlitzer/pull/11) ### [1.0.0](https://github.com/minrk/wurlitzer/compare/0.2.0...1.0.0) > 22 June 2017 - use control pipe to signal closure [`#8`](https://github.com/minrk/wurlitzer/pull/8) - import warnings [`#3`](https://github.com/minrk/wurlitzer/pull/3) - Do nothing if loaded in terminal IPython [`#2`](https://github.com/minrk/wurlitzer/pull/2) #### [0.2.0](https://github.com/minrk/wurlitzer/compare/0.1.2...0.2.0) > 14 March 2016 - Make it an IPython extension [`5aa2237`](https://github.com/minrk/wurlitzer/commit/5aa22375de5516915bb1cb9168e04430933e86a6) #### [0.1.2](https://github.com/minrk/wurlitzer/compare/0.1.1...0.1.2) > 13 March 2016 - readme more [`e905543`](https://github.com/minrk/wurlitzer/commit/e9055432933b29a70246299f2534e44af01c7edb) - flush before entering wurlitzer [`a8b3a85`](https://github.com/minrk/wurlitzer/commit/a8b3a856a576fe50e8771fddad7fcf3b21ae3285) - bump patch on release [`841cf92`](https://github.com/minrk/wurlitzer/commit/841cf922a77fd1a954ff968530d096ecfc1879aa) #### [0.1.1](https://github.com/minrk/wurlitzer/compare/0.1.0...0.1.1) > 9 March 2016 - fix names in README, long_description [`7f95a69`](https://github.com/minrk/wurlitzer/commit/7f95a690985e9ff2e7360c2c433fa9b9187f8758) #### 0.1.0 > 9 March 2016 - init package [`88e28b7`](https://github.com/minrk/wurlitzer/commit/88e28b7685806006fdd3c9a2021705be1b9fbbed) - Add demo notebook [`ea70e0e`](https://github.com/minrk/wurlitzer/commit/ea70e0e1f82ccb2e3283b6baf2c1d91c0b05ac8a) - Initial commit [`9646cf2`](https://github.com/minrk/wurlitzer/commit/9646cf2417cc46c61d1f6437f8f76efa56ccf2d8) wurlitzer-3.1.1/Demo.ipynb000066400000000000000000000133331463227376000155270ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Capturing C-level stdout/stderr with `wurlitzer`\n", "\n", "Sometimes in Python you are calling some C code.\n", "Sometimes that C code makes calls to `printf`,\n", "or otherwise writes to the stdout/stderr of the process." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import ctypes\n", "libc = ctypes.CDLL(None)\n", "\n", "try:\n", " c_stderr_p = ctypes.c_void_p.in_dll(libc, 'stderr')\n", "except ValueError:\n", " # libc.stdout is has a funny name on OS X\n", " c_stderr_p = ctypes.c_void_p.in_dll(libc, '__stderrp')\n", "\n", "\n", "def printf(msg):\n", " \"\"\"Call C printf\"\"\"\n", " libc.printf((msg + '\\n').encode('utf8'))\n", "\n", "def printf_err(msg):\n", " \"\"\"Cal C fprintf on stderr\"\"\"\n", " libc.fprintf(c_stderr_p, (msg + '\\n').encode('utf8'))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "IPython forwards the Python-level `sys.stdout` and `sys.stderr`,\n", "but it leaves the process-level file descriptors that C code will write to untouched.\n", "That means that in a context like this notebook, these functions will print to the terminal, because they are not captured:" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [], "source": [ "printf(\"Hello?\")\n", "printf_err(\"Stderr? Anybody?\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "With wurlitzer, we can capture these C-level functions:" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": true }, "outputs": [], "source": [ "from wurlitzer import pipes, sys_pipes, STDOUT, PIPE" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": true }, "outputs": [], "source": [ "with pipes() as (stdout, stderr):\n", " printf(\"Hello, stdout!\")\n", " printf_err(\"Hello, stderr!\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "and redisplay them if we like:" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Hello, stdout!\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Hello, stderr!\n" ] } ], "source": [ "import sys\n", "sys.stdout.write(stdout.read())\n", "sys.stderr.write(stderr.read())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Some tools, such as the IPython kernel for Jupyter,\n", "capture the Python-level `sys.stdout` and `sys.stderr` and forward them somewhere.\n", "In the case of Jupyter, this is over a network socket, so that it ends up in the browser.\n", "\n", "If we know that's going on, we can easily hook up the C outputs to the Python-forwarded ones with a single call:" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Hello from C, 0!\n", "Hello from C, 1!\n", "Hello from C, 2!\n", "Hello from C, 3!\n", "Hello from C, 4!\n" ] } ], "source": [ "import time\n", "\n", "with sys_pipes():\n", " for i in range(5):\n", " time.sleep(1)\n", " printf(\"Hello from C, %i!\" % i)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can also capture the pipes to any writeable streams, such as a `StringIO` object:" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Hello, stdout!\n", "Hello, stderr!\n", "\n" ] } ], "source": [ "import io\n", "\n", "stdout = io.StringIO()\n", "with pipes(stdout=stdout, stderr=STDOUT):\n", " printf(\"Hello, stdout!\")\n", " printf_err(\"Hello, stderr!\")\n", "\n", "print(stdout.getvalue())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## IPython extension\n", "\n", "You can also enable wurlitzer as an IPython extension,\n", "so that it always forwards C-level output during execution:" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": true }, "outputs": [], "source": [ "%load_ext wurlitzer" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Hello from C, 0!\n", "Hello from C, 1!\n", "Hello from C, 2!\n", "Hello from C, 3!\n", "Hello from C, 4!\n" ] } ], "source": [ "for i in range(5):\n", " time.sleep(1)\n", " printf(\"Hello from C, %i!\" % i)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1" } }, "nbformat": 4, "nbformat_minor": 1 } wurlitzer-3.1.1/LICENSE000066400000000000000000000020611463227376000146010ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2016 Min RK Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. wurlitzer-3.1.1/MANIFEST.in000066400000000000000000000001751463227376000153360ustar00rootroot00000000000000include README.md include test.py include LICENSE include Demo.ipynb include dev-requirements.txt prune htmlcov prune build wurlitzer-3.1.1/README.md000066400000000000000000000037371463227376000150660ustar00rootroot00000000000000# Wurlitzer Capture C-level stdout/stderr pipes in Python via `os.dup2`. For more details on why this is needed, please read [this blog post](https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/). ## Install pip install wurlitzer ## Usage Capture stdout/stderr in pipes: ```python from wurlitzer import pipes with pipes() as (out, err): call_some_c_function() stdout = out.read() ``` Capture both stdout and stderr in a single StringIO object: ```python from io import StringIO from wurlitzer import pipes, STDOUT out = StringIO() with pipes(stdout=out, stderr=STDOUT): call_some_c_function() stdout = out.getvalue() ``` Forward C-level stdout/stderr to Python sys.stdout/stderr, which may already be forwarded somewhere by the environment, e.g. IPython: ```python from wurlitzer import sys_pipes with sys_pipes(): call_some_c_function() ``` Forward C-level output to Python Logger objects (new in 3.1). Each line of output will be a log message. ```python from wurlitzer import pipes, STDOUT import logging logger = logging.getLogger("my.log") logger.setLevel(logging.INFO) logger.addHandler(logging.FileHandler("mycode.log")) with pipes(logger, stderr=STDOUT): call_some_c_function() ``` Forward C-level output to a file (avoids GIL issues with a background thread, new in 3.1): ```python from wurlitzer import pipes, STDOUT with open("log.txt", "ab") as f, pipes(f, stderr=STDOUT): blocking_gil_holding_function() ``` Or even simpler, enable it as an IPython extension: ``` %load_ext wurlitzer ``` To forward all C-level output to IPython (e.g. Jupyter cell output) during execution. ## Acknowledgments This package is based on stuff we learned with @takluyver and @karies while working on capturing output from the [Cling Kernel](https://github.com/root-mirror/cling/tree/master/tools/Jupyter/kernel) for Jupyter. ## Wurlitzer?! [Wurlitzer](https://en.wikipedia.org/wiki/Wurlitzer) makes pipe organs. Get it? Pipes? Naming is hard. wurlitzer-3.1.1/Vagrantfile000066400000000000000000000014251463227376000157640ustar00rootroot00000000000000Vagrant.configure("2") do |config| config.vm.synced_folder ".", "/vagrant", type: "rsync" config.ssh.forward_env = [ # for codecov, from # https://github.com/codecov/uploader/blob/b561fe71e0262aa606b7391014ff01228adfac3d/src/ci_providers/provider_githubactions.ts#L113-L124 'GITHUB_ACTION', 'GITHUB_HEAD_REF', 'GITHUB_REF', 'GITHUB_REPOSITORY', 'GITHUB_RUN_ID', 'GITHUB_SERVER_URL', 'GITHUB_SHA', 'GITHUB_WORKFLOW', ] config.vm.define "freebsd" do |bsd| vm = bsd.vm vm.box = "generic/freebsd12" vm.provision "shell", inline: "pkg update; pkg install -y git py39-pip py39-sqlite3", privileged: true vm.provision "shell", inline: "echo 'export PATH=$HOME/.local/bin:$PATH' >> $HOME/.bash_profile", privileged: false end end wurlitzer-3.1.1/dev-requirements.txt000066400000000000000000000000751463227376000176370ustar00rootroot00000000000000codecov mock # python_version < '3.0' pytest>=2.8 pytest-cov wurlitzer-3.1.1/pyproject.toml000066400000000000000000000015341463227376000165140ustar00rootroot00000000000000[tool.isort] profile = "black" [tool.black] skip-string-normalization = true target_version = [ "py35", "py36", "py37", "py38", ] [tool.tbump] # Uncomment this if your project is hosted on GitHub: github_url = "https://github.com/minrk/wurlitzer" [tool.tbump.version] current = "3.1.1" # Example of a semver regexp. # Make sure this matches current_version before # using tbump regex = ''' (?P\d+) \. (?P\d+) \. (?P\d+) (?P
((a|b|rc)\d+)|)
  \.?
  (?P(?<=\.)dev\d*|)
  '''

[tool.tbump.git]
message_template = "Bump to {new_version}"
tag_template = "{new_version}"

# For each file to patch, add a [[tool.tbump.file]] config
# section containing the path of the file, relative to the
# pyproject.toml location.

[[tool.tbump.file]]
src = "wurlitzer.py"
search = "__version__ = '{current_version}'"
wurlitzer-3.1.1/release.sh000077500000000000000000000003171463227376000155550ustar00rootroot00000000000000#!/bin/sh
set -eux

which tbump
before=$(git rev-parse HEAD)
auto-changelog -v $VERSION
pre-commit run --files CHANGELOG.md || true
git add CHANGELOG.md
git commit -m "changelog for $VERSION"
tbump $VERSION
wurlitzer-3.1.1/setup.py000066400000000000000000000026511463227376000153130ustar00rootroot00000000000000#!/usr/bin/env python
import sys

from setuptools import setup
from setuptools.command.bdist_egg import bdist_egg

with open("README.md") as f:
    long_description = f.read()

version_ns = {}
with open("wurlitzer.py") as f:
    for line in f:
        if line.startswith("__version__"):
            exec(line, version_ns)


class bdist_egg_disabled(bdist_egg):
    """Disabled version of bdist_egg

    Prevents setup.py install from performing setuptools' default easy_install,
    which it should never ever do.
    """

    def run(self):
        sys.exit(
            "Aborting implicit building of eggs. Use `pip install .` to install from source."
        )


setup_args = dict(
    name="wurlitzer",
    version=version_ns["__version__"],
    author="Min RK",
    author_email="benjaminrk@gmail.com",
    description="Capture C-level output in context managers",
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://github.com/minrk/wurlitzer",
    py_modules=["wurlitzer"],
    python_requires=">=3.5",
    license="MIT",
    cmdclass={
        "bdist_egg": bdist_egg if "bdist_egg" in sys.argv else bdist_egg_disabled
    },
    classifiers=[
        "Development Status :: 4 - Beta",
        "Intended Audience :: Developers",
        "License :: OSI Approved :: MIT License",
        "Programming Language :: Python :: 3",
    ],
)

if __name__ == "__main__":
    setup(**setup_args)
wurlitzer-3.1.1/test.py000066400000000000000000000155271463227376000151400ustar00rootroot00000000000000# coding: utf-8
from __future__ import print_function

import io
import logging
import os
import platform
import sys
import time
from fcntl import fcntl
from tempfile import TemporaryFile
from unittest import mock

import pytest

import wurlitzer
from wurlitzer import (
    PIPE,
    STDOUT,
    Wurlitzer,
    c_stderr_p,
    c_stdout_p,
    libc,
    pipes,
    stop_sys_pipes,
    sys_pipes,
    sys_pipes_forever,
)


def printf(msg):
    """Call C printf"""
    libc.printf((msg + '\n').encode('utf8'))


def printf_err(msg):
    """Cal C fprintf on stderr"""
    libc.fprintf(c_stderr_p, (msg + '\n').encode('utf8'))


def test_pipes():
    with pipes(stdout=PIPE, stderr=PIPE) as (stdout, stderr):
        printf(u"Hellø")
        printf_err(u"Hi, stdérr")

    assert stdout.read() == u"Hellø\n"
    assert stderr.read() == u"Hi, stdérr\n"


def test_pipe_bytes():
    with pipes(encoding=None) as (stdout, stderr):
        printf(u"Hellø")
        printf_err(u"Hi, stdérr")

    assert stdout.read() == u"Hellø\n".encode('utf8')
    assert stderr.read() == u"Hi, stdérr\n".encode('utf8')


def test_forward():
    stdout = io.StringIO()
    stderr = io.StringIO()
    with pipes(stdout=stdout, stderr=stderr) as (_stdout, _stderr):
        printf(u"Hellø")
        printf_err(u"Hi, stdérr")
        assert _stdout is stdout
        assert _stderr is stderr

    assert stdout.getvalue() == u"Hellø\n"
    assert stderr.getvalue() == u"Hi, stdérr\n"


def test_pipes_stderr():
    stdout = io.StringIO()
    with pipes(stdout=stdout, stderr=STDOUT) as (_stdout, _stderr):
        printf(u"Hellø")
        libc.fflush(c_stdout_p)
        time.sleep(0.1)
        printf_err(u"Hi, stdérr")
        assert _stdout is stdout
        assert _stderr is None

    assert stdout.getvalue() == u"Hellø\nHi, stdérr\n"


def test_flush():
    stdout = io.StringIO()
    w = Wurlitzer(stdout=stdout, stderr=STDOUT)
    with w:
        printf_err(u"Hellø")
        time.sleep(0.5)
        assert stdout.getvalue().strip() == u"Hellø"


def test_sys_pipes():
    stdout = io.StringIO()
    stderr = io.StringIO()
    with mock.patch('sys.stdout', stdout), mock.patch(
        'sys.stderr', stderr
    ), sys_pipes():
        printf(u"Hellø")
        printf_err(u"Hi, stdérr")

    assert stdout.getvalue() == u"Hellø\n"
    assert stderr.getvalue() == u"Hi, stdérr\n"


def test_sys_pipes_check():
    # pytest redirects stdout; un-redirect it for the test
    with mock.patch('sys.stdout', sys.__stdout__), mock.patch(
        'sys.stderr', sys.__stderr__
    ):
        with pytest.raises(ValueError):
            with sys_pipes():
                pass


def test_redirect_everything():
    stdout = io.StringIO()
    stderr = io.StringIO()
    with mock.patch('sys.stdout', stdout), mock.patch('sys.stderr', stderr):
        sys_pipes_forever()
        printf(u"Hellø")
        printf_err(u"Hi, stdérr")
        stop_sys_pipes()
    assert stdout.getvalue() == u"Hellø\n"
    assert stderr.getvalue() == u"Hi, stdérr\n"


def count_fds():
    """utility for counting file descriptors"""
    proc_fds = '/proc/{}/fd'.format(os.getpid())
    if os.path.isdir(proc_fds):
        return len(proc_fds)
    else:
        # this is an approximate count,
        # but it should at least be stable if we aren't leaking
        with TemporaryFile() as tf:
            return tf.fileno()


def test_fd_leak():
    base_count = count_fds()
    with pipes():
        print('ok')
    assert count_fds() == base_count
    for i in range(10):
        with pipes():
            print('ok')
        assert count_fds() == base_count


def test_buffer_full():
    with pipes(stdout=None, stderr=io.StringIO()) as (stdout, stderr):
        long_string = "x" * 100000  # create a long string (longer than 65536)
        printf_err(long_string)

    # Test never reaches here as the process hangs.
    assert stderr.getvalue() == long_string + "\n"


def test_buffer_full_default():
    with pipes() as (stdout, stderr):
        long_string = "x" * 100000  # create a long string (longer than 65536)
        printf(long_string)

    # Test never reaches here as the process hangs.
    assert stdout.read() == long_string + "\n"


def test_pipe_max_size():
    max_pipe_size = wurlitzer._get_max_pipe_size()
    if platform.system() == 'Linux':
        assert 65535 <= max_pipe_size <= 1024 * 1024
    else:
        assert max_pipe_size is None


@pytest.mark.skipif(
    wurlitzer._get_max_pipe_size() is None, reason="requires _get_max_pipe_size"
)
def test_bufsize():
    default_bufsize = wurlitzer._get_max_pipe_size()
    with wurlitzer.pipes() as (stdout, stderr):
        assert fcntl(sys.__stdout__, wurlitzer.F_GETPIPE_SZ) == default_bufsize
        assert fcntl(sys.__stderr__, wurlitzer.F_GETPIPE_SZ) == default_bufsize

    bufsize = 2**18  # seems to only accept powers of two?
    with wurlitzer.pipes(bufsize=bufsize) as (stdout, stderr):
        assert fcntl(sys.__stdout__, wurlitzer.F_GETPIPE_SZ) == bufsize
        assert fcntl(sys.__stderr__, wurlitzer.F_GETPIPE_SZ) == bufsize


def test_log_pipes(caplog):
    with caplog.at_level(logging.INFO), wurlitzer.pipes(
        logging.getLogger("wurlitzer.stdout"), logging.getLogger("wurlitzer.stderr")
    ):
        printf("some stdout")
        printf_err("some stderr")

    stdout_logs = []
    stderr_logs = []
    for t in caplog.record_tuples:
        if "stdout" in t[0]:
            stdout_logs.append(t)
        else:
            stderr_logs.append(t)

    assert stdout_logs == [
        ("wurlitzer.stdout", logging.INFO, "some stdout"),
    ]
    assert stderr_logs == [
        ("wurlitzer.stderr", logging.ERROR, "some stderr"),
    ]

    for record in caplog.records:
        # check 'stream' extra
        assert record.stream
        assert record.name == "wurlitzer." + record.stream


def test_two_file_pipes(tmpdir):

    test_stdout = tmpdir / "stdout.txt"
    test_stderr = tmpdir / "stderr.txt"

    with test_stdout.open("ab") as stdout_f, test_stderr.open("ab") as stderr_f:
        w = Wurlitzer(stdout_f, stderr_f)
        with w:
            assert w.thread is None
            printf("some stdout")
            printf_err("some stderr")
        # make sure capture stopped
        printf("after stdout")
        printf_err("after stderr")

    with test_stdout.open() as f:
        assert f.read() == "some stdout\n"
    with test_stderr.open() as f:
        assert f.read() == "some stderr\n"


def test_one_file_pipe(tmpdir):

    test_stdout = tmpdir / "stdout.txt"

    with test_stdout.open("ab") as stdout_f:
        stderr = io.StringIO()
        w = Wurlitzer(stdout_f, stderr)
        with w as (stdout, stderr):
            assert w.thread is not None
            printf("some stdout")
            printf_err("some stderr")
        assert not w.thread.is_alive()

    with test_stdout.open() as f:
        assert f.read() == "some stdout\n"
    assert stderr.getvalue() == "some stderr\n"
wurlitzer-3.1.1/wurlitzer.py000066400000000000000000000450231463227376000162220ustar00rootroot00000000000000"""Capture C-level FD output on pipes

Use `wurlitzer.pipes` or `wurlitzer.sys_pipes` as context managers.
"""

from __future__ import print_function

__version__ = '3.1.1'

__all__ = [
    'pipes',
    'sys_pipes',
    'sys_pipes_forever',
    'stop_sys_pipes',
    'PIPE',
    'STDOUT',
    'Wurlitzer',
]

import ctypes
import errno
import io
import logging
import os
import platform
import selectors
import sys
import threading
import time
import warnings
from contextlib import contextmanager
from fcntl import F_GETFL, F_SETFL, fcntl
from functools import lru_cache
from queue import Queue

try:
    from fcntl import F_GETPIPE_SZ, F_SETPIPE_SZ
except ImportError:
    # ref: linux uapi/linux/fcntl.h
    F_SETPIPE_SZ = 1024 + 7
    F_GETPIPE_SZ = 1024 + 8

libc = ctypes.CDLL(None)


def _get_streams_cffi():
    """Use CFFI to lookup stdout/stderr pointers

    Should work ~everywhere, but requires compilation
    """
    try:
        import cffi
    except ImportError:
        raise ImportError(
            "Failed to lookup stdout symbols in libc. Fallback requires cffi."
        )

    try:
        _ffi = cffi.FFI()
        _ffi.cdef("const size_t c_stdout_p();")
        _ffi.cdef("const size_t c_stderr_p();")
        _lib = _ffi.verify(
            '\n'.join(
                [
                    "#include ",
                    "const size_t c_stdout_p() { return (size_t) (void*) stdout; }",
                    "const size_t c_stderr_p() { return (size_t) (void*) stderr; }",
                ]
            )
        )
        c_stdout_p = ctypes.c_void_p(_lib.c_stdout_p())
        c_stderr_p = ctypes.c_void_p(_lib.c_stderr_p())
    except Exception as e:
        warnings.warn(
            "Failed to lookup stdout with cffi: {}.\nStreams may not be flushed.".format(
                e
            )
        )
        return (None, None)
    else:
        return c_stdout_p, c_stderr_p


c_stdout_p = c_stderr_p = None
try:
    c_stdout_p = ctypes.c_void_p.in_dll(libc, 'stdout')
    c_stderr_p = ctypes.c_void_p.in_dll(libc, 'stderr')
except ValueError:
    # libc.stdout has a funny name on macOS
    try:
        c_stdout_p = ctypes.c_void_p.in_dll(libc, '__stdoutp')
        c_stderr_p = ctypes.c_void_p.in_dll(libc, '__stderrp')
    except ValueError:
        c_stdout_p, c_stderr_p = _get_streams_cffi()


STDOUT = 2
PIPE = 3

_default_encoding = getattr(sys.stdin, 'encoding', None) or 'utf8'
if _default_encoding.lower() == 'ascii':
    # don't respect ascii
    _default_encoding = 'utf8'  # pragma: no cover


def dup2(a, b, timeout=3):
    """Like os.dup2, but retry on EBUSY"""
    dup_err = None
    # give FDs 3 seconds to not be busy anymore
    for i in range(int(10 * timeout)):
        try:
            return os.dup2(a, b)
        except OSError as e:
            dup_err = e
            if e.errno == errno.EBUSY:
                time.sleep(0.1)
            else:
                raise
    if dup_err:
        raise dup_err


@lru_cache()
def _get_max_pipe_size():
    """Get max pipe size

    Reads /proc/sys/fs/pipe-max-size on Linux.
    Always returns None elsewhere.

    Returns integer (up to 1MB),
    or None if no value can be determined.

    Adapted from wal-e, (c) 2018, WAL-E Contributors
    used under BSD-3-clause
    """
    if platform.system() != 'Linux':
        return

    # If Linux procfs (or something that looks like it) exposes its
    # maximum F_SETPIPE_SZ, adjust the default buffer sizes.
    try:
        with open('/proc/sys/fs/pipe-max-size', 'r') as f:
            # Figure out OS max pipe size
            pipe_max_size = int(f.read())
    except Exception:
        pass
    else:
        if pipe_max_size > 1024 * 1024:
            # avoid unusually large values, limit to 1MB
            return 1024 * 1024
        elif pipe_max_size <= 65536:
            # smaller than default, don't do anything
            return None
        else:
            return pipe_max_size


class Wurlitzer:
    """Class for Capturing Process-level FD output via dup2

    Typically used via `wurlitzer.pipes`
    """

    flush_interval = 0.2

    def __init__(
        self,
        stdout=None,
        stderr=None,
        encoding=_default_encoding,
        bufsize=_get_max_pipe_size(),
    ):
        """
        Parameters
        ----------
        stdout: stream or None
            The stream for forwarding stdout.
        stderr = stream or None
            The stream for forwarding stderr.
        encoding: str or None
            The encoding to use, if streams should be interpreted as text.
        bufsize: int or None
            Set pipe buffer size using fcntl F_SETPIPE_SZ (linux only)
            default: use /proc/sys/fs/pipe-max-size up to a max of 1MB
            if 0, will do nothing.
        """
        # accept logger objects
        if stdout and isinstance(stdout, logging.Logger):
            stdout = _LogPipe(stdout, stream_name="stdout", level=logging.INFO)
        if stderr and isinstance(stderr, logging.Logger):
            stderr = _LogPipe(stderr, stream_name="stderr", level=logging.ERROR)

        self._stdout = stdout
        if stderr == STDOUT:
            self._stderr = self._stdout
        else:
            self._stderr = stderr
        self.encoding = encoding
        if bufsize is None:
            bufsize = _get_max_pipe_size()
        self._bufsize = bufsize
        self._save_fds = {}
        self._real_fds = {}
        self._handlers = {}
        self._handlers['stderr'] = self._handle_stderr
        self._handlers['stdout'] = self._handle_stdout

    def _setup_pipe(self, name):
        real_fd = getattr(sys, '__%s__' % name).fileno()
        save_fd = os.dup(real_fd)
        self._save_fds[name] = save_fd
        self._real_fds[name] = real_fd

        try:
            capture_fd = getattr(self, "_" + name).fileno()
        except Exception:
            pass
        else:
            # if it has a fileno(),
            # dup directly to capture file,
            # no pipes needed
            dup2(capture_fd, real_fd)
            return None

        pipe_out, pipe_in = os.pipe()
        # set max pipe buffer size (linux only)
        if self._bufsize:
            try:
                fcntl(pipe_in, F_SETPIPE_SZ, self._bufsize)
            except OSError as error:
                warnings.warn(
                    "Failed to set pipe buffer size: " + str(error), RuntimeWarning
                )

        dup2(pipe_in, real_fd)
        os.close(pipe_in)

        # make pipe_out non-blocking
        flags = fcntl(pipe_out, F_GETFL)
        fcntl(pipe_out, F_SETFL, flags | os.O_NONBLOCK)
        return pipe_out

    def _decode(self, data):
        """Decode data, if any

        Called before passing to stdout/stderr streams
        """
        if self.encoding:
            data = data.decode(self.encoding, 'replace')
        return data

    def _handle_stdout(self, data):
        if self._stdout:
            self._stdout.write(self._decode(data))

    def _handle_stderr(self, data):
        if self._stderr:
            self._stderr.write(self._decode(data))

    def _setup_handle(self):
        """Setup handle for output, if any"""
        self.handle = (self._stdout, self._stderr)

    def _finish_handle(self):
        """Finish handle, if anything should be done when it's all wrapped up."""
        pass

    def _flush(self):
        """flush sys.stdout/err and low-level FDs"""
        if self._stdout and sys.stdout:
            sys.stdout.flush()
        if self._stderr and sys.stderr:
            sys.stderr.flush()

        if c_stdout_p is not None:
            libc.fflush(c_stdout_p)

        if c_stderr_p is not None:
            libc.fflush(c_stderr_p)

    def __enter__(self):
        # flush anything out before starting
        self._flush()
        # setup handle
        self._setup_handle()

        # create pipe for stdout
        pipes = []
        names = {}
        if self._stdout:
            pipe = self._setup_pipe('stdout')
            if pipe:
                pipes.append(pipe)
                names[pipe] = 'stdout'
        if self._stderr:
            pipe = self._setup_pipe('stderr')
            if pipe:
                pipes.append(pipe)
                names[pipe] = 'stderr'

        if not pipes:
            # no pipes to handle (e.g. direct FD capture)
            # so no forwarder thread needed
            self.thread = None
            return self.handle

        # setup forwarder thread

        self._control_r, self._control_w = os.pipe()
        pipes.append(self._control_r)
        names[self._control_r] = "control"

        # flush pipes in a background thread to avoid blocking
        # the reader thread when the buffer is full
        flush_queue = Queue()

        def flush_main():
            while True:
                msg = flush_queue.get()
                if msg == 'stop':
                    return
                self._flush()

        flush_thread = threading.Thread(target=flush_main)
        flush_thread.daemon = True
        flush_thread.start()

        def forwarder():
            """Forward bytes on a pipe to stream messages"""
            draining = False
            flush_interval = 0
            poller = selectors.DefaultSelector()

            for pipe_ in pipes:
                poller.register(pipe_, selectors.EVENT_READ)

            while pipes:
                events = poller.select(flush_interval)
                if events:
                    # found something to read, don't block select until
                    # we run out of things to read
                    flush_interval = 0
                else:
                    # nothing to read
                    if draining:
                        # if we are draining and there's nothing to read, stop
                        break
                    else:
                        # nothing to read, get ready to wait.
                        # flush the streams in case there's something waiting
                        # to be written.
                        flush_queue.put('flush')
                        flush_interval = self.flush_interval
                        continue

                for selector_key, flags in events:
                    fd = selector_key.fd
                    if fd == self._control_r:
                        draining = True
                        pipes.remove(self._control_r)
                        poller.unregister(self._control_r)
                        os.close(self._control_r)
                        continue
                    name = names[fd]
                    data = os.read(fd, 1024)
                    if not data:
                        # pipe closed, stop polling it
                        pipes.remove(fd)
                        poller.unregister(fd)
                        os.close(fd)
                    else:
                        handler = getattr(self, '_handle_%s' % name)
                        handler(data)
                if not pipes:
                    # pipes closed, we are done
                    break
            # stop flush thread
            flush_queue.put('stop')
            flush_thread.join()
            # cleanup pipes
            [os.close(pipe) for pipe in pipes]
            poller.close()

        self.thread = threading.Thread(target=forwarder)
        self.thread.daemon = True
        self.thread.start()

        return self.handle

    def __exit__(self, exc_type, exc_value, traceback):
        # flush before exiting
        self._flush()
        if self.thread:
            # signal output is complete on control pipe
            os.write(self._control_w, b'\1')
            self.thread.join()
            os.close(self._control_w)

        # restore original state
        for name, real_fd in self._real_fds.items():
            save_fd = self._save_fds[name]
            dup2(save_fd, real_fd)
            os.close(save_fd)
        # finalize handle
        self._finish_handle()


@contextmanager
def pipes(stdout=PIPE, stderr=PIPE, encoding=_default_encoding, bufsize=None):
    """Capture C-level stdout/stderr in a context manager.

    The return value for the context manager is (stdout, stderr).

    Args:

    stdout (optional, default: PIPE): None or PIPE or Writable or Logger
    stderr (optional, default: PIPE): None or PIPE or STDOUT or Writable or Logger
    encoding (optional): probably 'utf-8'
    bufsize (optional): set explicit buffer size if the default doesn't work

    .. versionadded:: 3.1
        Accept Logger objects for stdout/stderr.
        If a Logger is specified, each line will produce a log message.
        stdout messages will be at INFO level, stderr messages at ERROR level.

    .. versionchanged:: 3.0

        when using `PIPE` (default), the type of captured output
        is `io.StringIO/BytesIO` instead of an OS pipe.
        This eliminates max buffer size issues (and hang when output exceeds 65536 bytes),
        but also means the buffer cannot be read with `.read()` methods
        until after the context exits.

    Examples
    --------

    >>> with pipes() as (stdout, stderr):
    ...     printf("C-level stdout")
    ... output = stdout.read()
    """
    stdout_pipe = stderr_pipe = False
    if encoding:
        PipeIO = io.StringIO
    else:
        PipeIO = io.BytesIO

    # accept logger objects
    if stdout and isinstance(stdout, logging.Logger):
        stdout = _LogPipe(stdout, stream_name="stdout", level=logging.INFO)
    if stderr and isinstance(stderr, logging.Logger):
        stderr = _LogPipe(stderr, stream_name="stderr", level=logging.ERROR)

    # setup stdout
    if stdout == PIPE:
        stdout_r = stdout_w = PipeIO()
        stdout_pipe = True
    else:
        stdout_r = stdout_w = stdout
    # setup stderr
    if stderr == STDOUT:
        stderr_r = None
        stderr_w = stdout_w
    elif stderr == PIPE:
        stderr_r = stderr_w = PipeIO()
        stderr_pipe = True
    else:
        stderr_r = stderr_w = stderr
    w = Wurlitzer(stdout=stdout_w, stderr=stderr_w, encoding=encoding, bufsize=bufsize)
    try:
        with w:
            yield stdout_r, stderr_r
    finally:
        if stdout and isinstance(stdout, _LogPipe):
            stdout.flush()
        if stderr and isinstance(stderr, _LogPipe):
            stderr.flush()
        # close pipes
        if stdout_pipe:
            # seek to 0 so that it can be read after exit
            stdout_r.seek(0)
        if stderr_pipe:
            # seek to 0 so that it can be read after exit
            stderr_r.seek(0)


class _LogPipe(io.BufferedWriter):
    """Writeable that writes lines to a Logger object as they arrive from captured pipes"""

    def __init__(self, logger, stream_name, level=logging.INFO):
        self.logger = logger
        self.stream_name = stream_name
        self._buf = ""
        self.level = level

    def _log(self, line):
        """Log one line"""
        self.logger.log(self.level, line.rstrip(), extra={"stream": self.stream_name})

    def write(self, chunk):
        """Given chunk, split into lines

        Log each line as a discrete message

        If it ends with a partial line, save it until the next one
        """
        lines = chunk.splitlines(True)
        if self._buf:
            lines[0] = self._buf + lines[0]
        if lines[-1].endswith("\n"):
            self._buf = ""
        else:
            # last line is incomplete
            self._buf = lines[-1]
            lines = lines[:-1]

        for line in lines:
            self._log(line)

    def flush(self):
        """Write buffer as a last message if there is one"""
        if self._buf:
            self._log(self._buf)
            self._buf = ""

    def __enter__(self):
        return self

    def __exit__(self, *exc_info):
        self.flush()


def sys_pipes(encoding=_default_encoding, bufsize=None):
    """Redirect C-level stdout/stderr to sys.stdout/stderr

    This is useful of sys.sdout/stderr are already being forwarded somewhere,
    e.g. in a Jupyter kernel.

    DO NOT USE THIS if sys.stdout and sys.stderr are not already being forwarded.
    """
    # check that we aren't forwarding stdout to itself
    for name in ("stdout", "stderr"):
        stream = getattr(sys, name)
        capture_stream = getattr(sys, "__{}__".format(name))
        try:
            fd = stream.fileno()
            capture_fd = capture_stream.fileno()
        except Exception:
            # ignore errors - if sys.stdout doesn't need a fileno,
            # it's definitely not the original sys.__stdout__
            continue
        else:
            if fd == capture_fd:
                raise ValueError(
                    "Cannot forward sys.__{0}__ to sys.{0}: they are the same! Maybe you want wurlitzer.pipes()?".format(
                        name
                    )
                )
    return pipes(sys.stdout, sys.stderr, encoding=encoding, bufsize=bufsize)


_mighty_wurlitzer = None
_mighty_lock = threading.Lock()


def sys_pipes_forever(encoding=_default_encoding, bufsize=None):
    """Redirect all C output to sys.stdout/err

    This is not a context manager; it turns on C-forwarding permanently.
    """
    global _mighty_wurlitzer
    with _mighty_lock:
        if _mighty_wurlitzer is None:
            _mighty_wurlitzer = sys_pipes(encoding, bufsize)
            _mighty_wurlitzer.__enter__()


def stop_sys_pipes():
    """Stop permanent redirection started by sys_pipes_forever"""
    global _mighty_wurlitzer
    with _mighty_lock:
        if _mighty_wurlitzer is not None:
            _mighty_wurlitzer.__exit__(None, None, None)
            _mighty_wurlitzer = None


_extension_enabled = False


def load_ipython_extension(ip):
    """Register me as an IPython extension

    Captures all C output during execution and forwards to sys.

    Does nothing on terminal IPython.

    Use: %load_ext wurlitzer
    """
    global _extension_enabled

    if not getattr(ip, 'kernel', None):
        warnings.warn("wurlitzer extension doesn't do anything in terminal IPython")
        return
    for name in ("__stdout__", "__stderr__"):
        if getattr(sys, name) is None:
            warnings.warn("sys.{} is None. Wurlitzer can't capture output without it.")
            return

    ip.events.register('pre_execute', sys_pipes_forever)
    ip.events.register('post_execute', stop_sys_pipes)
    _extension_enabled = True


def unload_ipython_extension(ip):
    """Unload me as an IPython extension

    Use: %unload_ext wurlitzer
    """
    global _extension_enabled
    if not _extension_enabled:
        return

    ip.events.unregister('pre_execute', sys_pipes_forever)
    ip.events.unregister('post_execute', stop_sys_pipes)
    # sys_pipes_forever was called in pre_execute
    # after unregister we need to call it explicitly:
    stop_sys_pipes()
    _extension_enabled = False