pax_global_header00006660000000000000000000000064144151671030014514gustar00rootroot0000000000000052 comment=e00005047e3e564e0d4757a9522bd9aff5eb960d qasync-0.24.1/000077500000000000000000000000001441516710300130765ustar00rootroot00000000000000qasync-0.24.1/.flake8000066400000000000000000000000651441516710300142520ustar00rootroot00000000000000[flake8] exclude = __pycache__ max-line-length = 120 qasync-0.24.1/.github/000077500000000000000000000000001441516710300144365ustar00rootroot00000000000000qasync-0.24.1/.github/workflows/000077500000000000000000000000001441516710300164735ustar00rootroot00000000000000qasync-0.24.1/.github/workflows/publish.yml000066400000000000000000000013151441516710300206640ustar00rootroot00000000000000# Upload a Python Package using Twine when a release is created name: Publish Python Package on: release: types: [published] permissions: contents: read jobs: deploy: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install build - name: Build package run: python -m build - name: Publish package uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} qasync-0.24.1/.github/workflows/test.yml000066400000000000000000000032001441516710300201700ustar00rootroot00000000000000name: Run Tests on: [push] jobs: ci: name: Python-${{ matrix.python }} ${{ matrix.os }} ${{ matrix.qt.qt_api }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-20.04, windows-latest] python: ["3.6", "3.7", "3.8", "3.9"] qt: - package: PyQt5 qt_api: "pyqt5" - package: PyQt6 qt_api: "pyqt6" - package: PySide2 qt_api: "pyside2" - package: PySide6 qt_api: "pyside6" exclude: - os: windows-latest python: "3.6" qt: { package: PyQt6, qt_api: "pyqt6" } steps: - name: Checkout uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} architecture: x64 - name: Install pipenv run: | python -m pip install --upgrade pipenv wheel - name: Install dependencies run: | pipenv install --python ${{ matrix.python }} --dev pipenv run --python ${{ matrix.python }} pip install ${{ matrix.qt.package }} pytest - name: Install Libxcb dependencies if: matrix.os == 'ubuntu-20.04' run: | sudo apt-get update sudo apt-get install '^libxcb.*-dev' libx11-xcb-dev libglu1-mesa-dev libxrender-dev libxi-dev libxkbcommon-dev libxkbcommon-x11-dev - name: Run headless test uses: coactions/setup-xvfb@v1 env: QT_API: ${{ matrix.qt.qt_api }} with: run: pipenv run --python ${{ matrix.python }} pytest -v qasync-0.24.1/.gitignore000066400000000000000000000067051441516710300150760ustar00rootroot00000000000000.idea/ .vscode/ Pipfile.lock # Created by https://www.gitignore.io/api/pycharm,python # Edit at https://www.gitignore.io/?templates=pycharm,python ### PyCharm ### # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 # User-specific stuff .idea/**/workspace.xml .idea/**/tasks.xml .idea/**/usage.statistics.xml .idea/**/dictionaries .idea/**/shelf # Generated files .idea/**/contentModel.xml # Sensitive or high-churn files .idea/**/dataSources/ .idea/**/dataSources.ids .idea/**/dataSources.local.xml .idea/**/sqlDataSources.xml .idea/**/dynamic.xml .idea/**/uiDesigner.xml .idea/**/dbnavigator.xml # Gradle .idea/**/gradle.xml .idea/**/libraries # Gradle and Maven with auto-import # When using Gradle or Maven with auto-import, you should exclude module files, # since they will be recreated, and may cause churn. Uncomment if using # auto-import. # .idea/modules.xml # .idea/*.iml # .idea/modules # *.iml # *.ipr # CMake cmake-build-*/ # Mongo Explorer plugin .idea/**/mongoSettings.xml # File-based project format *.iws # IntelliJ out/ # mpeltonen/sbt-idea plugin .idea_modules/ # JIRA plugin atlassian-ide-plugin.xml # Cursive Clojure plugin .idea/replstate.xml # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml crashlytics.properties crashlytics-build.properties fabric.properties # Editor-based Rest Client .idea/httpRequests # Android studio 3.1+ serialized cache file .idea/caches/build_file_checksums.ser ### PyCharm Patch ### # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 # *.iml # modules.xml # .idea/misc.xml # *.ipr # Sonarlint plugin .idea/**/sonarlint/ # SonarQube Plugin .idea/**/sonarIssues.xml # Markdown Navigator plugin .idea/**/markdown-navigator.xml .idea/**/markdown-navigator/ ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # Mr Developer .mr.developer.cfg .project .pydevproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # End of https://www.gitignore.io/api/pycharm,python qasync-0.24.1/.pre-commit-config.yaml000066400000000000000000000002321441516710300173540ustar00rootroot00000000000000repos: # Black formats the Python code. - repo: https://github.com/psf/black rev: 23.3.0 hooks: - id: black language_version: python3 qasync-0.24.1/LICENSE000066400000000000000000000025541441516710300141110ustar00rootroot00000000000000Copyright (c) 2019, Sam McCormack Copyright (c) 2018, Gerard Marull-Paretas Copyright (c) 2014-2018, Mark Harviston, Arve Knudsen All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. qasync-0.24.1/MANIFEST.in000066400000000000000000000000331441516710300146300ustar00rootroot00000000000000include README.rst LICENSE qasync-0.24.1/Pipfile000066400000000000000000000003411441516710300144070ustar00rootroot00000000000000[[source]] url = "https://pypi.org/simple" name = "pypi" verify_ssl = true [dev-packages] atomicwrites = "*" pytest = "*" pytest-forked = "*" pytest-raises = "*" [packages] [dev-packages.qasync] editable = true path = "." qasync-0.24.1/README.md000066400000000000000000000025351441516710300143620ustar00rootroot00000000000000# qasync [![Maintenance](https://img.shields.io/maintenance/yes/2023)](https://pypi.org/project/qasync) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/qasync)](https://pypi.org/project/qasync) [![PyPI - License](https://img.shields.io/pypi/l/qasync)](/LICENSE) [![PyPI](https://img.shields.io/pypi/v/qasync)](https://pypi.org/project/qasync) [![test](https://github.com/CabbageDevelopment/qasync/actions/workflows/test.yml/badge.svg)](https://github.com/CabbageDevelopment/qasync/actions/workflows/test.yml) ## Introduction `qasync` allows coroutines to be used in PyQt/PySide applications by providing an implementation of the `PEP 3156` event-loop. `qasync` is a fork of [asyncqt](https://github.com/gmarull/asyncqt), which is a fork of [quamash](https://github.com/harvimt/quamash). May it live longer than its predecessors. #### The future of `qasync` `qasync` was created because `asyncqt` and `quamash` are no longer maintained. **`qasync` will continue to be maintained, and will still be accepting pull requests.** ## Requirements `qasync` requires Python >= 3.6, and PyQt5 or PySide2. ## Installation To install `qasync`, use `pip`: ``` pip install qasync ``` ## License You may use, modify and redistribute this software under the terms of the [BSD License](http://opensource.org/licenses/BSD-2-Clause). See [LICENSE](/LICENSE). qasync-0.24.1/examples/000077500000000000000000000000001441516710300147145ustar00rootroot00000000000000qasync-0.24.1/examples/aiohttp_fetch.py000066400000000000000000000045321441516710300201130ustar00rootroot00000000000000import asyncio import functools import sys import aiohttp # from PyQt5.QtWidgets import ( from PySide2.QtWidgets import ( QWidget, QLabel, QLineEdit, QTextEdit, QPushButton, QVBoxLayout, ) import qasync from qasync import asyncSlot, asyncClose, QApplication class MainWindow(QWidget): """Main window.""" _DEF_URL = "https://jsonplaceholder.typicode.com/todos/1" """str: Default URL.""" _SESSION_TIMEOUT = 1.0 """float: Session timeout.""" def __init__(self): super().__init__() self.setLayout(QVBoxLayout()) self.lblStatus = QLabel("Idle", self) self.layout().addWidget(self.lblStatus) self.editUrl = QLineEdit(self._DEF_URL, self) self.layout().addWidget(self.editUrl) self.editResponse = QTextEdit("", self) self.layout().addWidget(self.editResponse) self.btnFetch = QPushButton("Fetch", self) self.btnFetch.clicked.connect(self.on_btnFetch_clicked) self.layout().addWidget(self.btnFetch) self.session = aiohttp.ClientSession( loop=asyncio.get_event_loop(), timeout=aiohttp.ClientTimeout(total=self._SESSION_TIMEOUT), ) @asyncClose async def closeEvent(self, event): await self.session.close() @asyncSlot() async def on_btnFetch_clicked(self): self.btnFetch.setEnabled(False) self.lblStatus.setText("Fetching...") try: async with self.session.get(self.editUrl.text()) as r: self.editResponse.setText(await r.text()) except Exception as exc: self.lblStatus.setText("Error: {}".format(exc)) else: self.lblStatus.setText("Finished!") finally: self.btnFetch.setEnabled(True) async def main(): def close_future(future, loop): loop.call_later(10, future.cancel) future.cancel() loop = asyncio.get_event_loop() future = asyncio.Future() app = QApplication.instance() if hasattr(app, "aboutToQuit"): getattr(app, "aboutToQuit").connect( functools.partial(close_future, future, loop) ) mainWindow = MainWindow() mainWindow.show() await future return True if __name__ == "__main__": try: qasync.run(main()) except asyncio.exceptions.CancelledError: sys.exit(0) qasync-0.24.1/examples/executor_example.py000066400000000000000000000014121441516710300206350ustar00rootroot00000000000000import functools import sys import asyncio import time import qasync # from PyQt5.QtWidgets import ( from PySide2.QtWidgets import QApplication, QProgressBar from qasync import QEventLoop, QThreadExecutor async def master(): progress = QProgressBar() progress.setRange(0, 99) progress.show() await first_50(progress) loop = asyncio.get_running_loop() with QThreadExecutor(1) as exec: await loop.run_in_executor(exec, functools.partial(last_50, progress), loop) async def first_50(progress): for i in range(50): progress.setValue(i) await asyncio.sleep(0.1) def last_50(progress, loop): for i in range(50, 100): loop.call_soon_threadsafe(progress.setValue, i) time.sleep(0.1) qasync.run(master()) qasync-0.24.1/qasync/000077500000000000000000000000001441516710300143745ustar00rootroot00000000000000qasync-0.24.1/qasync/__init__.py000066400000000000000000000644251441516710300165200ustar00rootroot00000000000000""" Implementation of the PEP 3156 Event-Loop with Qt. Copyright (c) 2018 Gerard Marull-Paretas Copyright (c) 2014 Mark Harviston Copyright (c) 2014 Arve Knudsen BSD License """ __author__ = ( "Sam McCormack", "Gerard Marull-Paretas , " "Mark Harviston , " "Arve Knudsen ", ) __version__ = "0.24.0" __url__ = "https://github.com/CabbageDevelopment/qasync" __license__ = "BSD" __all__ = ["QEventLoop", "QThreadExecutor", "asyncSlot", "asyncClose"] import asyncio import contextlib import functools import importlib import inspect import itertools import logging import os import sys import time from concurrent.futures import Future from queue import Queue logger = logging.getLogger(__name__) QtModule = None # If QT_API env variable is given, use that or fail trying qtapi_env = os.getenv("QT_API", "").strip().lower() if qtapi_env: env_to_mod_map = { "pyqt5": "PyQt5", "pyqt6": "PyQt6", "pyqt": "PyQt4", "pyqt4": "PyQt4", "pyside6": "PySide6", "pyside2": "PySide2", "pyside": "PySide", } if qtapi_env in env_to_mod_map: QtModuleName = env_to_mod_map[qtapi_env] else: raise ImportError( "QT_API environment variable set ({}) but not one of [{}].".format( qtapi_env, ", ".join(env_to_mod_map.keys()) ) ) logger.info("Forcing use of {} as Qt Implementation".format(QtModuleName)) QtModule = importlib.import_module(QtModuleName) # If a Qt lib is already imported, use that if not QtModule: for QtModuleName in ("PyQt5", "PyQt6", "PySide2", "PySide6"): if QtModuleName in sys.modules: QtModule = sys.modules[QtModuleName] break # Try importing qt libs if not QtModule: for QtModuleName in ("PyQt5", "PyQt6", "PySide2", "PySide6"): try: QtModule = importlib.import_module(QtModuleName) except ImportError: continue else: break if not QtModule: raise ImportError("No Qt implementations found") logger.info("Using Qt Implementation: {}".format(QtModuleName)) QtCore = importlib.import_module(QtModuleName + ".QtCore", package=QtModuleName) QtGui = importlib.import_module(QtModuleName + ".QtGui", package=QtModuleName) if QtModuleName == "PyQt5": from PyQt5 import QtWidgets from PyQt5.QtCore import pyqtSlot as Slot QApplication = QtWidgets.QApplication elif QtModuleName == "PyQt6": from PyQt6 import QtWidgets from PyQt6.QtCore import pyqtSlot as Slot QApplication = QtWidgets.QApplication elif QtModuleName == "PySide2": from PySide2 import QtWidgets from PySide2.QtCore import Slot QApplication = QtWidgets.QApplication elif QtModuleName == "PySide6": from PySide6 import QtWidgets from PySide6.QtCore import Slot QApplication = QtWidgets.QApplication from ._common import with_logger # noqa @with_logger class _QThreadWorker(QtCore.QThread): """ Read jobs from the queue and then execute them. For use by the QThreadExecutor """ def __init__(self, queue, num, stackSize=None): self.__queue = queue self.__stop = False self.__num = num super().__init__() if stackSize is not None: self.setStackSize(stackSize) def run(self): queue = self.__queue while True: command = queue.get() if command is None: # Stopping... break future, callback, args, kwargs = command self._logger.debug( "#%s got callback %s with args %s and kwargs %s from queue", self.__num, callback, args, kwargs, ) if future.set_running_or_notify_cancel(): self._logger.debug("Invoking callback") try: r = callback(*args, **kwargs) except Exception as err: self._logger.debug("Setting Future exception: %s", err) future.set_exception(err) else: self._logger.debug("Setting Future result: %s", r) future.set_result(r) else: self._logger.debug("Future was canceled") self._logger.debug("Thread #%s stopped", self.__num) def wait(self): self._logger.debug("Waiting for thread #%s to stop...", self.__num) super().wait() @with_logger class QThreadExecutor: """ ThreadExecutor that produces QThreads. Same API as `concurrent.futures.Executor` >>> from qasync import QThreadExecutor >>> with QThreadExecutor(5) as executor: ... f = executor.submit(lambda x: 2 + x, 2) ... r = f.result() ... assert r == 4 """ def __init__(self, max_workers=10, stack_size=None): super().__init__() self.__max_workers = max_workers self.__queue = Queue() if stack_size is None: # Match cpython/Python/thread_pthread.h if sys.platform.startswith("darwin"): stack_size = 16 * 2**20 elif sys.platform.startswith("freebsd"): stack_size = 4 * 2**20 elif sys.platform.startswith("aix"): stack_size = 2 * 2**20 self.__workers = [ _QThreadWorker(self.__queue, i + 1, stack_size) for i in range(max_workers) ] self.__been_shutdown = False for w in self.__workers: w.start() def submit(self, callback, *args, **kwargs): if self.__been_shutdown: raise RuntimeError("QThreadExecutor has been shutdown") future = Future() self._logger.debug( "Submitting callback %s with args %s and kwargs %s to thread worker queue", callback, args, kwargs, ) self.__queue.put((future, callback, args, kwargs)) return future def map(self, func, *iterables, timeout=None): raise NotImplementedError("use as_completed on the event loop") def shutdown(self, wait=True): if self.__been_shutdown: raise RuntimeError("QThreadExecutor has been shutdown") self.__been_shutdown = True self._logger.debug("Shutting down") for i in range(len(self.__workers)): # Signal workers to stop self.__queue.put(None) if wait: for w in self.__workers: w.wait() def __enter__(self, *args): if self.__been_shutdown: raise RuntimeError("QThreadExecutor has been shutdown") return self def __exit__(self, *args): self.shutdown() def _make_signaller(qtimpl_qtcore, *args): class Signaller(qtimpl_qtcore.QObject): try: signal = qtimpl_qtcore.Signal(*args) except AttributeError: signal = qtimpl_qtcore.pyqtSignal(*args) return Signaller() @with_logger class _SimpleTimer(QtCore.QObject): def __init__(self): super().__init__() self.__callbacks = {} self._stopped = False self.__debug_enabled = False def add_callback(self, handle, delay=0): timerid = self.startTimer(int(delay * 1000)) self.__log_debug("Registering timer id %s", timerid) assert timerid not in self.__callbacks self.__callbacks[timerid] = handle return handle def timerEvent(self, event): # noqa: N802 timerid = event.timerId() self.__log_debug("Timer event on id %s", timerid) if self._stopped: self.__log_debug("Timer stopped, killing %s", timerid) self.killTimer(timerid) del self.__callbacks[timerid] else: try: handle = self.__callbacks[timerid] except KeyError as e: self.__log_debug(e) pass else: if handle._cancelled: self.__log_debug("Handle %s cancelled", handle) else: self.__log_debug("Calling handle %s", handle) handle._run() finally: del self.__callbacks[timerid] handle = None self.killTimer(timerid) def stop(self): self.__log_debug("Stopping timers") self._stopped = True def set_debug(self, enabled): self.__debug_enabled = enabled def __log_debug(self, *args, **kwargs): if self.__debug_enabled: self._logger.debug(*args, **kwargs) def _fileno(fd): if isinstance(fd, int): return fd try: return int(fd.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError(f"Invalid file object: {fd!r}") from None @with_logger class _QEventLoop: """ Implementation of asyncio event loop that uses the Qt Event loop. >>> import asyncio >>> >>> app = getfixture('application') >>> >>> async def xplusy(x, y): ... await asyncio.sleep(.1) ... assert x + y == 4 ... await asyncio.sleep(.1) >>> >>> loop = QEventLoop(app) >>> asyncio.set_event_loop(loop) >>> with loop: ... loop.run_until_complete(xplusy(2, 2)) If the event loop shall be used with an existing and already running QApplication it must be specified in the constructor via already_running=True In this case the user is responsible for loop cleanup with stop() and close() """ def __init__(self, app=None, set_running_loop=True, already_running=False): self.__app = app or QApplication.instance() assert self.__app is not None, "No QApplication has been instantiated" self.__is_running = False self.__debug_enabled = False self.__default_executor = None self.__exception_handler = None self._read_notifiers = {} self._write_notifiers = {} self._timer = _SimpleTimer() self.__call_soon_signaller = signaller = _make_signaller(QtCore, object, tuple) self.__call_soon_signal = signaller.signal signaller.signal.connect(lambda callback, args: self.call_soon(callback, *args)) assert self.__app is not None super().__init__() if set_running_loop: asyncio.events._set_running_loop(self) # We have to set __is_running to True after calling # super().__init__() because of a bug in BaseEventLoop. if already_running: self.__is_running = True # it must be ensured that all pre- and # postprocessing for the eventloop is done self._before_run_forever() self.__app.aboutToQuit.connect(self._after_run_forever) def run_forever(self): """Run eventloop forever.""" if self.__is_running: raise RuntimeError("Event loop already running") self.__is_running = True self._before_run_forever() try: self.__log_debug("Starting Qt event loop") asyncio.events._set_running_loop(self) rslt = -1 try: rslt = self.__app.exec_() except AttributeError: rslt = self.__app.exec() self.__log_debug("Qt event loop ended with result %s", rslt) return rslt finally: asyncio.events._set_running_loop(None) self._after_run_forever() self.__is_running = False def run_until_complete(self, future): """Run until Future is complete.""" if self.__is_running: raise RuntimeError("Event loop already running") self.__log_debug("Running %s until complete", future) future = asyncio.ensure_future(future, loop=self) def stop(*args): self.stop() # noqa future.add_done_callback(stop) try: self.run_forever() finally: future.remove_done_callback(stop) self.__app.processEvents() # run loop one last time to process all the events if not future.done(): raise RuntimeError("Event loop stopped before Future completed.") self.__log_debug("Future %s finished running", future) return future.result() def stop(self): """Stop event loop.""" if not self.__is_running: self.__log_debug("Already stopped") return self.__log_debug("Stopping event loop...") self.__is_running = False self.__app.exit() self.__log_debug("Stopped event loop") def is_running(self): """Return True if the event loop is running, False otherwise.""" return self.__is_running def close(self): """ Release all resources used by the event loop. The loop cannot be restarted after it has been closed. """ if self.is_running(): raise RuntimeError("Cannot close a running event loop") if self.is_closed(): return self.__log_debug("Closing event loop...") if self.__default_executor is not None: self.__default_executor.shutdown() super().close() self._timer.stop() self.__app = None for notifier in itertools.chain( self._read_notifiers.values(), self._write_notifiers.values() ): notifier.setEnabled(False) self._read_notifiers = None self._write_notifiers = None def call_later(self, delay, callback, *args, context=None): """Register callback to be invoked after a certain delay.""" if asyncio.iscoroutinefunction(callback): raise TypeError("coroutines cannot be used with call_later") if not callable(callback): raise TypeError( "callback must be callable: {}".format(type(callback).__name__) ) self.__log_debug( "Registering callback %s to be invoked with arguments %s after %s second(s)", callback, args, delay, ) if sys.version_info >= (3, 7): return self._add_callback( asyncio.Handle(callback, args, self, context=context), delay ) return self._add_callback(asyncio.Handle(callback, args, self), delay) def _add_callback(self, handle, delay=0): return self._timer.add_callback(handle, delay) def call_soon(self, callback, *args, context=None): """Register a callback to be run on the next iteration of the event loop.""" return self.call_later(0, callback, *args, context=context) def call_at(self, when, callback, *args, context=None): """Register callback to be invoked at a certain time.""" return self.call_later(when - self.time(), callback, *args, context=context) def time(self): """Get time according to event loop's clock.""" return time.monotonic() def _add_reader(self, fd, callback, *args): """Register a callback for when a file descriptor is ready for reading.""" self._check_closed() try: existing = self._read_notifiers[fd] except KeyError: pass else: # this is necessary to avoid race condition-like issues existing.setEnabled(False) existing.activated["int"].disconnect() # will get overwritten by the assignment below anyways notifier = QtCore.QSocketNotifier(_fileno(fd), QtCore.QSocketNotifier.Type.Read) notifier.setEnabled(True) self.__log_debug("Adding reader callback for file descriptor %s", fd) notifier.activated["int"].connect( lambda: self.__on_notifier_ready( self._read_notifiers, notifier, fd, callback, args ) # noqa: C812 ) self._read_notifiers[fd] = notifier def _remove_reader(self, fd): """Remove reader callback.""" if self.is_closed(): return self.__log_debug("Removing reader callback for file descriptor %s", fd) try: notifier = self._read_notifiers.pop(fd) except KeyError: return False else: notifier.setEnabled(False) return True def _add_writer(self, fd, callback, *args): """Register a callback for when a file descriptor is ready for writing.""" self._check_closed() try: existing = self._write_notifiers[fd] except KeyError: pass else: # this is necessary to avoid race condition-like issues existing.setEnabled(False) existing.activated["int"].disconnect() # will get overwritten by the assignment below anyways notifier = QtCore.QSocketNotifier( _fileno(fd), QtCore.QSocketNotifier.Type.Write, ) notifier.setEnabled(True) self.__log_debug("Adding writer callback for file descriptor %s", fd) notifier.activated["int"].connect( lambda: self.__on_notifier_ready( self._write_notifiers, notifier, fd, callback, args ) # noqa: C812 ) self._write_notifiers[fd] = notifier def _remove_writer(self, fd): """Remove writer callback.""" if self.is_closed(): return self.__log_debug("Removing writer callback for file descriptor %s", fd) try: notifier = self._write_notifiers.pop(fd) except KeyError: return False else: notifier.setEnabled(False) return True def __notifier_cb_wrapper(self, notifiers, notifier, fd, callback, args): # This wrapper gets called with a certain delay. We cannot know # for sure that the notifier is still the current notifier for # the fd. if notifiers.get(fd, None) is not notifier: return try: callback(*args) finally: # The notifier might have been overriden by the # callback. We must not re-enable it in that case. if notifiers.get(fd, None) is notifier: notifier.setEnabled(True) else: notifier.activated["int"].disconnect() def __on_notifier_ready(self, notifiers, notifier, fd, callback, args): if fd not in notifiers: self._logger.warning( "Socket notifier for fd %s is ready, even though it should " "be disabled, not calling %s and disabling", fd, callback, ) notifier.setEnabled(False) return # It can be necessary to disable QSocketNotifier when e.g. checking # ZeroMQ sockets for events assert notifier.isEnabled() self.__log_debug("Socket notifier for fd %s is ready", fd) notifier.setEnabled(False) self.call_soon( self.__notifier_cb_wrapper, notifiers, notifier, fd, callback, args ) # Methods for interacting with threads. def call_soon_threadsafe(self, callback, *args, context=None): """Thread-safe version of call_soon.""" self.__call_soon_signal.emit(callback, args) def run_in_executor(self, executor, callback, *args): """Run callback in executor. If no executor is provided, the default executor will be used, which defers execution to a background thread. """ self.__log_debug("Running callback %s with args %s in executor", callback, args) if isinstance(callback, asyncio.Handle): assert not args assert not isinstance(callback, asyncio.TimerHandle) if callback._cancelled: f = asyncio.Future() f.set_result(None) return f callback, args = callback.callback, callback.args if executor is None: self.__log_debug("Using default executor") executor = self.__default_executor if executor is None: self.__log_debug("Creating default executor") executor = self.__default_executor = QThreadExecutor() return asyncio.wrap_future(executor.submit(callback, *args)) def set_default_executor(self, executor): self.__default_executor = executor # Error handlers. def set_exception_handler(self, handler): self.__exception_handler = handler def default_exception_handler(self, context): """Handle exceptions. This is the default exception handler. This is called when an exception occurs and no exception handler is set, and can be called by a custom exception handler that wants to defer to the default behavior. context parameter has the same meaning as in `call_exception_handler()`. """ self.__log_debug("Default exception handler executing") message = context.get("message") if not message: message = "Unhandled exception in event loop" try: exception = context["exception"] except KeyError: exc_info = False else: exc_info = (type(exception), exception, exception.__traceback__) log_lines = [message] for key in [k for k in sorted(context) if k not in {"message", "exception"}]: log_lines.append("{}: {!r}".format(key, context[key])) self.__log_error("\n".join(log_lines), exc_info=exc_info) def call_exception_handler(self, context): if self.__exception_handler is None: try: self.default_exception_handler(context) except Exception: # Second protection layer for unexpected errors # in the default implementation, as well as for subclassed # event loops with overloaded "default_exception_handler". self.__log_error( "Exception in default exception handler", exc_info=True ) return try: self.__exception_handler(self, context) except Exception as exc: # Exception in the user set custom exception handler. try: # Let's try the default handler. self.default_exception_handler( { "message": "Unhandled error in custom exception handler", "exception": exc, "context": context, } ) except Exception: # Guard 'default_exception_handler' in case it's # overloaded. self.__log_error( "Exception in default exception handler while handling an unexpected error " "in custom exception handler", exc_info=True, ) # Debug flag management. def get_debug(self): return self.__debug_enabled def set_debug(self, enabled): super().set_debug(enabled) self.__debug_enabled = enabled self._timer.set_debug(enabled) def __enter__(self): return self def __exit__(self, *args): self.stop() self.close() def __log_debug(self, *args, **kwargs): if self.__debug_enabled: self._logger.debug(*args, **kwargs) @classmethod def __log_error(cls, *args, **kwds): # In some cases, the error method itself fails, don't have a lot of options in that case try: cls._logger.error(*args, **kwds) except: # noqa E722 sys.stderr.write("{!r}, {!r}\n".format(args, kwds)) from ._unix import _SelectorEventLoop # noqa QSelectorEventLoop = type("QSelectorEventLoop", (_QEventLoop, _SelectorEventLoop), {}) if os.name == "nt": from ._windows import _ProactorEventLoop QIOCPEventLoop = type("QIOCPEventLoop", (_QEventLoop, _ProactorEventLoop), {}) QEventLoop = QIOCPEventLoop else: QEventLoop = QSelectorEventLoop class _Cancellable: def __init__(self, timer, loop): self.__timer = timer self.__loop = loop def cancel(self): self.__timer.stop() def asyncClose(fn): """Allow to run async code before application is closed.""" @functools.wraps(fn) def wrapper(*args, **kwargs): f = asyncio.ensure_future(fn(*args, **kwargs)) while not f.done(): QApplication.instance().processEvents() return wrapper def asyncSlot(*args, **kwargs): """Make a Qt async slot run on asyncio loop.""" def _error_handler(task): try: task.result() except Exception: sys.excepthook(*sys.exc_info()) def outer_decorator(fn): @Slot(*args, **kwargs) @functools.wraps(fn) def wrapper(*args, **kwargs): # Qt ignores trailing args from a signal but python does # not so inspect the slot signature and if it's not # callable try removing args until it is. task = None while len(args): try: inspect.signature(fn).bind(*args, **kwargs) except TypeError: if len(args): # Only convert args to a list if we need to pop() args = list(args) args.pop() continue else: task = asyncio.ensure_future(fn(*args, **kwargs)) task.add_done_callback(_error_handler) break if task is None: raise TypeError( "asyncSlot was not callable from Signal. Potential signature mismatch." ) return task return wrapper return outer_decorator class QEventLoopPolicyMixin: def new_event_loop(self): return QEventLoop(QApplication.instance() or QApplication(sys.argv)) class DefaultQEventLoopPolicy( QEventLoopPolicyMixin, asyncio.DefaultEventLoopPolicy, ): pass @contextlib.contextmanager def _set_event_loop_policy(policy): old_policy = asyncio.get_event_loop_policy() asyncio.set_event_loop_policy(policy) try: yield finally: asyncio.set_event_loop_policy(old_policy) def run(*args, **kwargs): with _set_event_loop_policy(DefaultQEventLoopPolicy()): return asyncio.run(*args, **kwargs) qasync-0.24.1/qasync/_common.py000066400000000000000000000011321441516710300163720ustar00rootroot00000000000000# © 2018 Gerard Marull-Paretas # © 2014 Mark Harviston # © 2014 Arve Knudsen # BSD License """Mostly irrelevant, but useful utilities common to UNIX and Windows.""" import logging def with_logger(cls): """Class decorator to add a logger to a class.""" attr_name = '_logger' cls_name = cls.__qualname__ module = cls.__module__ if module is not None: cls_name = module + '.' + cls_name else: raise AssertionError setattr(cls, attr_name, logging.getLogger(cls_name)) return cls qasync-0.24.1/qasync/_unix.py000066400000000000000000000141761441516710300161010ustar00rootroot00000000000000# © 2018 Gerard Marull-Paretas # © 2014 Mark Harviston # © 2014 Arve Knudsen # BSD License """UNIX specific Quamash functionality.""" import asyncio import selectors import collections from . import QtCore, with_logger, _fileno EVENT_READ = (1 << 0) EVENT_WRITE = (1 << 1) class _SelectorMapping(collections.abc.Mapping): """Mapping of file objects to selector keys.""" def __init__(self, selector): self._selector = selector def __len__(self): return len(self._selector._fd_to_key) def __getitem__(self, fileobj): try: fd = self._selector._fileobj_lookup(fileobj) return self._selector._fd_to_key[fd] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None def __iter__(self): return iter(self._selector._fd_to_key) @with_logger class _Selector(selectors.BaseSelector): def __init__(self, parent): # this maps file descriptors to keys self._fd_to_key = {} # read-only mapping returned by get_map() self.__map = _SelectorMapping(self) self.__read_notifiers = {} self.__write_notifiers = {} self.__parent = parent def select(self, *args, **kwargs): """Implement abstract method even though we don't need it.""" raise NotImplementedError def _fileobj_lookup(self, fileobj): """Return a file descriptor from a file object. This wraps _fileno() to do an exhaustive search in case the object is invalid but we still have it in our map. This is used by unregister() so we can unregister an object that was previously registered even if it is closed. It is also used by _SelectorMapping. """ try: return _fileno(fileobj) except ValueError: # Do an exhaustive search. for key in self._fd_to_key.values(): if key.fileobj is fileobj: return key.fd # Raise ValueError after all. raise def register(self, fileobj, events, data=None): if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): raise ValueError("Invalid events: {!r}".format(events)) key = selectors.SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) if key.fd in self._fd_to_key: raise KeyError("{!r} (FD {}) is already registered".format(fileobj, key.fd)) self._fd_to_key[key.fd] = key if events & EVENT_READ: notifier = QtCore.QSocketNotifier(key.fd, QtCore.QSocketNotifier.Read) notifier.activated['int'].connect(self.__on_read_activated) self.__read_notifiers[key.fd] = notifier if events & EVENT_WRITE: notifier = QtCore.QSocketNotifier(key.fd, QtCore.QSocketNotifier.Write) notifier.activated['int'].connect(self.__on_write_activated) self.__write_notifiers[key.fd] = notifier return key def __on_read_activated(self, fd): self._logger.debug('File %s ready to read', fd) key = self._key_from_fd(fd) if key: self.__parent._process_event(key, EVENT_READ & key.events) def __on_write_activated(self, fd): self._logger.debug('File %s ready to write', fd) key = self._key_from_fd(fd) if key: self.__parent._process_event(key, EVENT_WRITE & key.events) def unregister(self, fileobj): def drop_notifier(notifiers): try: notifier = notifiers.pop(key.fd) except KeyError: pass else: notifier.activated['int'].disconnect() try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None drop_notifier(self.__read_notifiers) drop_notifier(self.__write_notifiers) return key def modify(self, fileobj, events, data=None): try: key = self._fd_to_key[self._fileobj_lookup(fileobj)] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None if events != key.events: self.unregister(fileobj) key = self.register(fileobj, events, data) elif data != key.data: # Use a shortcut to update the data. key = key._replace(data=data) self._fd_to_key[key.fd] = key return key def close(self): self._logger.debug('Closing') self._fd_to_key.clear() self.__read_notifiers.clear() self.__write_notifiers.clear() def get_map(self): return self.__map def _key_from_fd(self, fd): """ Return the key associated to a given file descriptor. Parameters: fd -- file descriptor Returns: corresponding key, or None if not found """ try: return self._fd_to_key[fd] except KeyError: return None class _SelectorEventLoop(asyncio.SelectorEventLoop): def __init__(self): self._signal_safe_callbacks = [] selector = _Selector(self) asyncio.SelectorEventLoop.__init__(self, selector) def _before_run_forever(self): pass def _after_run_forever(self): pass def _process_event(self, key, mask): """Selector has delivered us an event.""" self._logger.debug('Processing event with key %s and mask %s', key, mask) fileobj, (reader, writer) = key.fileobj, key.data if mask & selectors.EVENT_READ and reader is not None: if reader._cancelled: self.remove_reader(fileobj) else: self._logger.debug('Invoking reader callback: %s', reader) reader._run() if mask & selectors.EVENT_WRITE and writer is not None: if writer._cancelled: self.remove_writer(fileobj) else: self._logger.debug('Invoking writer callback: %s', writer) writer._run() qasync-0.24.1/qasync/_windows.py000066400000000000000000000136021441516710300166010ustar00rootroot00000000000000# © 2018 Gerard Marull-Paretas # © 2014 Mark Harviston # © 2014 Arve Knudsen # BSD License """Windows specific Quamash functionality.""" import asyncio import sys try: import _winapi from asyncio import windows_events import _overlapped except ImportError: # noqa pass # w/o guarding this import py.test can't gather doctests on platforms w/o _winapi import math from . import QtCore, _make_signaller from ._common import with_logger UINT32_MAX = 0xffffffff class _ProactorEventLoop(asyncio.ProactorEventLoop): """Proactor based event loop.""" def __init__(self): super().__init__(_IocpProactor()) self.__event_signaller = _make_signaller(QtCore, list) self.__event_signal = self.__event_signaller.signal self.__event_signal.connect(self._process_events) self.__event_poller = _EventPoller(self.__event_signal) def _process_events(self, events): """Process events from proactor.""" for f, callback, transferred, key, ov in events: try: self._logger.debug('Invoking event callback %s', callback) value = callback(transferred, key, ov) except OSError as e: self._logger.debug('Event callback failed', exc_info=sys.exc_info()) if not f.done(): f.set_exception(e) else: if not f.cancelled(): f.set_result(value) def _before_run_forever(self): self.__event_poller.start(self._proactor) def _after_run_forever(self): self.__event_poller.stop() @with_logger class _IocpProactor(windows_events.IocpProactor): def __init__(self): self.__events = [] super(_IocpProactor, self).__init__() self._lock = QtCore.QMutex() def select(self, timeout=None): """Override in order to handle events in a threadsafe manner.""" if not self.__events: self._poll(timeout) tmp = self.__events self.__events = [] return tmp def close(self): self._logger.debug('Closing') super(_IocpProactor, self).close() def recv(self, conn, nbytes, flags=0): with QtCore.QMutexLocker(self._lock): return super(_IocpProactor, self).recv(conn, nbytes, flags) def send(self, conn, buf, flags=0): with QtCore.QMutexLocker(self._lock): return super(_IocpProactor, self).send(conn, buf, flags) def _poll(self, timeout=None): """Override in order to handle events in a threadsafe manner.""" if timeout is None: ms = UINT32_MAX # wait for eternity elif timeout < 0: raise ValueError("negative timeout") else: # GetQueuedCompletionStatus() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. ms = math.ceil(timeout * 1e3) if ms >= UINT32_MAX: raise ValueError("timeout too big") with QtCore.QMutexLocker(self._lock): while True: # self._logger.debug('Polling IOCP with timeout {} ms in thread {}...'.format( # ms, threading.get_ident())) status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms) if status is None: break err, transferred, key, address = status try: f, ov, obj, callback = self._cache.pop(address) except KeyError: # key is either zero, or it is used to return a pipe # handle which should be closed to avoid a leak. if key not in (0, _overlapped.INVALID_HANDLE_VALUE): _winapi.CloseHandle(key) ms = 0 continue if obj in self._stopped_serving: f.cancel() # Futures might already be resolved or cancelled elif not f.done(): self.__events.append((f, callback, transferred, key, ov)) ms = 0 def _wait_for_handle(self, handle, timeout, _is_cancel): with QtCore.QMutexLocker(self._lock): return super(_IocpProactor, self)._wait_for_handle(handle, timeout, _is_cancel) def accept(self, listener): with QtCore.QMutexLocker(self._lock): return super(_IocpProactor, self).accept(listener) def connect(self, conn, address): with QtCore.QMutexLocker(self._lock): return super(_IocpProactor, self).connect(conn, address) @with_logger class _EventWorker(QtCore.QThread): def __init__(self, proactor, parent): super().__init__() self.__stop = False self.__proactor = proactor self.__sig_events = parent.sig_events self.__semaphore = QtCore.QSemaphore() def start(self): super().start() self.__semaphore.acquire() def stop(self): self.__stop = True # Wait for thread to end self.wait() def run(self): self._logger.debug('Thread started') self.__semaphore.release() while not self.__stop: events = self.__proactor.select(0.01) if events: self._logger.debug('Got events from poll: %s', events) self.__sig_events.emit(events) self._logger.debug('Exiting thread') @with_logger class _EventPoller: """Polling of events in separate thread.""" def __init__(self, sig_events): self.sig_events = sig_events def start(self, proactor): self._logger.debug('Starting (proactor: %s)...', proactor) self.__worker = _EventWorker(proactor, self) self.__worker.start() def stop(self): self._logger.debug('Stopping worker thread...') self.__worker.stop() qasync-0.24.1/setup.py000066400000000000000000000025761441516710300146220ustar00rootroot00000000000000import os.path import re from setuptools import setup with open("qasync/__init__.py") as f: version = re.search(r'__version__\s+=\s+"(.*)"', f.read()).group(1) desc_path = os.path.join(os.path.dirname(__file__), "README.md") with open(desc_path, encoding="utf8") as desc_file: long_description = desc_file.read() setup( name="qasync", version=version, url="https://github.com/CabbageDevelopment/qasync", author=", ".join( ("Sam McCormack", "Gerard Marull-Paretas", "Mark Harviston", "Arve Knudsen") ), packages=["qasync"], python_requires="~=3.6", license="BSD", description="Implementation of the PEP 3156 Event-Loop with Qt.", long_description=long_description, long_description_content_type="text/markdown", keywords=["Qt", "asyncio"], classifiers=[ "Development Status :: 4 - Beta", "License :: OSI Approved :: BSD License", "Intended Audience :: Developers", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3 :: Only", "Environment :: X11 Applications :: Qt", ], ) qasync-0.24.1/tests/000077500000000000000000000000001441516710300142405ustar00rootroot00000000000000qasync-0.24.1/tests/conftest.py000066400000000000000000000010551441516710300164400ustar00rootroot00000000000000# © 2018 Gerard Marull-Paretas # © 2014 Mark Harviston # © 2014 Arve Knudsen # BSD License import os import logging from pytest import fixture logging.basicConfig( level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ) if os.name == "nt": collect_ignore = ["qasync/_unix.py"] else: collect_ignore = ["qasync/_windows.py"] @fixture(scope="session") def application(): from qasync import QApplication return QApplication([]) qasync-0.24.1/tests/test_qeventloop.py000066400000000000000000000540751441516710300200600ustar00rootroot00000000000000# © 2018 Gerard Marull-Paretas # © 2014 Mark Harviston # © 2014 Arve Knudsen # BSD License import asyncio import logging import sys import os import ctypes import multiprocessing from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor import socket import subprocess import qasync import pytest @pytest.fixture def loop(request, application): lp = qasync.QEventLoop(application) asyncio.set_event_loop(lp) additional_exceptions = [] def fin(): sys.excepthook = orig_excepthook try: lp.close() finally: asyncio.set_event_loop(None) for exc in additional_exceptions: if ( os.name == "nt" and isinstance(exc["exception"], WindowsError) and exc["exception"].winerror == 6 ): # ignore Invalid Handle Errors continue raise exc["exception"] def except_handler(loop, ctx): additional_exceptions.append(ctx) def excepthook(type, *args): lp.stop() orig_excepthook(type, *args) orig_excepthook = sys.excepthook sys.excepthook = excepthook lp.set_exception_handler(except_handler) request.addfinalizer(fin) return lp @pytest.fixture( params=[None, qasync.QThreadExecutor, ThreadPoolExecutor, ProcessPoolExecutor], ) def executor(request): exc_cls = request.param if exc_cls is None: return None exc = exc_cls(1) # FIXME? fixed number of workers? request.addfinalizer(exc.shutdown) return exc ExceptionTester = type( "ExceptionTester", (Exception,), {} ) # to make flake8 not complain class TestCanRunTasksInExecutor: """ Test Cases Concerning running jobs in Executors. This needs to be a class because pickle can't serialize closures, but can serialize bound methods. multiprocessing can only handle pickleable functions. """ def test_can_run_tasks_in_executor(self, loop, executor): """Verify that tasks can be run in an executor.""" logging.debug("Loop: {!r}".format(loop)) logging.debug("Executor: {!r}".format(executor)) manager = multiprocessing.Manager() was_invoked = manager.Value(ctypes.c_int, 0) logging.debug("running until complete") loop.run_until_complete(self.blocking_task(loop, executor, was_invoked)) logging.debug("ran") assert was_invoked.value == 1 def test_can_handle_exception_in_executor(self, loop, executor): with pytest.raises(ExceptionTester) as excinfo: loop.run_until_complete( asyncio.wait_for( loop.run_in_executor(executor, self.blocking_failure), timeout=3.0, ) ) assert str(excinfo.value) == "Testing" def blocking_failure(self): logging.debug("raising") try: raise ExceptionTester("Testing") finally: logging.debug("raised!") def blocking_func(self, was_invoked): logging.debug("start blocking_func()") was_invoked.value = 1 logging.debug("end blocking_func()") async def blocking_task(self, loop, executor, was_invoked): logging.debug("start blocking task()") fut = loop.run_in_executor(executor, self.blocking_func, was_invoked) await asyncio.wait_for(fut, timeout=5.0) logging.debug("start blocking task()") def test_can_execute_subprocess(loop): """Verify that a subprocess can be executed.""" async def mycoro(): process = await asyncio.create_subprocess_exec( sys.executable or "python", "-c", "import sys; sys.exit(5)" ) await process.wait() assert process.returncode == 5 loop.run_until_complete(asyncio.wait_for(mycoro(), timeout=3)) def test_can_read_subprocess(loop): """Verify that a subprocess's data can be read from stdout.""" async def mycoro(): process = await asyncio.create_subprocess_exec( sys.executable or "python", "-c", 'print("Hello async world!")', stdout=subprocess.PIPE, ) received_stdout = await process.stdout.readexactly(len(b"Hello async world!\n")) await process.wait() assert process.returncode == 0 assert received_stdout.strip() == b"Hello async world!" loop.run_until_complete(asyncio.wait_for(mycoro(), timeout=3)) def test_can_communicate_subprocess(loop): """Verify that a subprocess's data can be passed in/out via stdin/stdout.""" async def mycoro(): process = await asyncio.create_subprocess_exec( sys.executable or "python", "-c", "print(input())", stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) received_stdout, received_stderr = await process.communicate( b"Hello async world!\n" ) await process.wait() assert process.returncode == 0 assert received_stdout.strip() == b"Hello async world!" loop.run_until_complete(asyncio.wait_for(mycoro(), timeout=3)) def test_can_terminate_subprocess(loop): """Verify that a subprocess can be terminated.""" # Start a never-ending process async def mycoro(): process = await asyncio.create_subprocess_exec( sys.executable or "python", "-c", "import time\nwhile True: time.sleep(1)" ) process.terminate() await process.wait() assert process.returncode != 0 loop.run_until_complete(mycoro()) @pytest.mark.raises(ExceptionTester) def test_loop_callback_exceptions_bubble_up(loop): """Verify that test exceptions raised in event loop callbacks bubble up.""" def raise_test_exception(): raise ExceptionTester("Test Message") loop.call_soon(raise_test_exception) loop.run_until_complete(asyncio.sleep(0.1)) def test_loop_running(loop): """Verify that loop.is_running returns True when running.""" async def is_running(): nonlocal loop assert loop.is_running() loop.run_until_complete(is_running()) def test_loop_not_running(loop): """Verify that loop.is_running returns False when not running.""" assert not loop.is_running() def test_get_running_loop_fails_after_completion(loop): """Verify that after loop stops, asyncio._get_running_loop() correctly returns None.""" async def is_running_loop(): nonlocal loop assert asyncio._get_running_loop() == loop loop.run_until_complete(is_running_loop()) assert asyncio._get_running_loop() is None def test_loop_can_run_twice(loop): """Verify that loop is correctly reset as asyncio._get_running_loop() when restarted.""" async def is_running_loop(): nonlocal loop assert asyncio._get_running_loop() == loop loop.run_until_complete(is_running_loop()) loop.run_until_complete(is_running_loop()) def test_can_function_as_context_manager(application): """Verify that a QEventLoop can function as its own context manager.""" with qasync.QEventLoop(application) as loop: assert isinstance(loop, qasync.QEventLoop) loop.call_soon(loop.stop) loop.run_forever() def test_future_not_done_on_loop_shutdown(loop): """Verify RuntimError occurs when loop stopped before Future completed with run_until_complete.""" loop.call_later(0.1, loop.stop) fut = asyncio.Future() with pytest.raises(RuntimeError): loop.run_until_complete(fut) def test_call_later_must_not_coroutine(loop): """Verify TypeError occurs call_later is given a coroutine.""" async def mycoro(): pass with pytest.raises(TypeError): loop.call_soon(mycoro) def test_call_later_must_be_callable(loop): """Verify TypeError occurs call_later is not given a callable.""" not_callable = object() with pytest.raises(TypeError): loop.call_soon(not_callable) def test_call_at(loop): """Verify that loop.call_at works as expected.""" def mycallback(): nonlocal was_invoked was_invoked = True was_invoked = False loop.call_at(loop.time() + 0.05, mycallback) loop.run_until_complete(asyncio.sleep(0.1)) assert was_invoked def test_get_set_debug(loop): """Verify get_debug and set_debug work as expected.""" loop.set_debug(True) assert loop.get_debug() loop.set_debug(False) assert not loop.get_debug() @pytest.fixture def sock_pair(request): """Create socket pair. If socket.socketpair isn't available, we emulate it. """ def fin(): if client_sock is not None: client_sock.close() if srv_sock is not None: srv_sock.close() client_sock = srv_sock = None request.addfinalizer(fin) # See if socketpair() is available. have_socketpair = hasattr(socket, "socketpair") if have_socketpair: client_sock, srv_sock = socket.socketpair() return client_sock, srv_sock # Create a non-blocking temporary server socket temp_srv_sock = socket.socket() temp_srv_sock.setblocking(False) temp_srv_sock.bind(("", 0)) port = temp_srv_sock.getsockname()[1] temp_srv_sock.listen(1) # Create non-blocking client socket client_sock = socket.socket() client_sock.setblocking(False) try: client_sock.connect(("localhost", port)) except socket.error as err: # Error 10035 (operation would block) is not an error, as we're doing this with a # non-blocking socket. if err.errno != 10035: raise # Use select to wait for connect() to succeed. import select timeout = 1 readable = select.select([temp_srv_sock], [], [], timeout)[0] if temp_srv_sock not in readable: raise Exception("Client socket not connected in {} second(s)".format(timeout)) srv_sock, _ = temp_srv_sock.accept() return client_sock, srv_sock def test_can_add_reader(loop, sock_pair): """Verify that we can add a reader callback to an event loop.""" def can_read(): if fut.done(): return data = srv_sock.recv(1) if len(data) != 1: return nonlocal got_msg got_msg = data # Indicate that we're done fut.set_result(None) srv_sock.close() def write(): client_sock.send(ref_msg) client_sock.close() ref_msg = b"a" client_sock, srv_sock = sock_pair loop.call_soon(write) exp_num_notifiers = len(loop._read_notifiers) + 1 got_msg = None fut = asyncio.Future() loop._add_reader(srv_sock.fileno(), can_read) assert len(loop._read_notifiers) == exp_num_notifiers, "Notifier should be added" loop.run_until_complete(asyncio.wait_for(fut, timeout=1.0)) assert got_msg == ref_msg def test_can_remove_reader(loop, sock_pair): """Verify that we can remove a reader callback from an event loop.""" def can_read(): data = srv_sock.recv(1) if len(data) != 1: return nonlocal got_msg got_msg = data client_sock, srv_sock = sock_pair got_msg = None loop._add_reader(srv_sock.fileno(), can_read) exp_num_notifiers = len(loop._read_notifiers) - 1 loop._remove_reader(srv_sock.fileno()) assert len(loop._read_notifiers) == exp_num_notifiers, "Notifier should be removed" client_sock.send(b"a") client_sock.close() # Run for a short while to see if we get a read notification loop.call_later(0.1, loop.stop) loop.run_forever() assert got_msg is None, "Should not have received a read notification" def test_remove_reader_after_closing(loop, sock_pair): """Verify that we can remove a reader callback from an event loop.""" client_sock, srv_sock = sock_pair loop._add_reader(srv_sock.fileno(), lambda: None) loop.close() loop._remove_reader(srv_sock.fileno()) def test_remove_writer_after_closing(loop, sock_pair): """Verify that we can remove a reader callback from an event loop.""" client_sock, srv_sock = sock_pair loop._add_writer(client_sock.fileno(), lambda: None) loop.close() loop._remove_writer(client_sock.fileno()) def test_add_reader_after_closing(loop, sock_pair): """Verify that we can remove a reader callback from an event loop.""" client_sock, srv_sock = sock_pair loop.close() with pytest.raises(RuntimeError): loop._add_reader(srv_sock.fileno(), lambda: None) def test_add_writer_after_closing(loop, sock_pair): """Verify that we can remove a reader callback from an event loop.""" client_sock, srv_sock = sock_pair loop.close() with pytest.raises(RuntimeError): loop._add_writer(client_sock.fileno(), lambda: None) def test_can_add_writer(loop, sock_pair): """Verify that we can add a writer callback to an event loop.""" def can_write(): if not fut.done(): # Indicate that we're done fut.set_result(None) client_sock.close() client_sock, _ = sock_pair fut = asyncio.Future() loop._add_writer(client_sock.fileno(), can_write) assert len(loop._write_notifiers) == 1, "Notifier should be added" loop.run_until_complete(asyncio.wait_for(fut, timeout=1.0)) def test_can_remove_writer(loop, sock_pair): """Verify that we can remove a writer callback from an event loop.""" client_sock, _ = sock_pair loop._add_writer(client_sock.fileno(), lambda: None) loop._remove_writer(client_sock.fileno()) assert not loop._write_notifiers, "Notifier should be removed" def test_add_reader_should_disable_qsocket_notifier_on_callback(loop, sock_pair): """Verify that add_reader disables QSocketNotifier during callback.""" def can_read(): nonlocal num_calls num_calls += 1 if num_calls == 2: # Since we get called again, the QSocketNotifier should've been re-enabled before # this call (although disabled during) assert not notifier.isEnabled() srv_sock.recv(1) fut.set_result(None) srv_sock.close() return assert not notifier.isEnabled() def write(): client_sock.send(b"a") client_sock.close() num_calls = 0 client_sock, srv_sock = sock_pair loop.call_soon(write) fut = asyncio.Future() loop._add_reader(srv_sock.fileno(), can_read) notifier = loop._read_notifiers[srv_sock.fileno()] loop.run_until_complete(asyncio.wait_for(fut, timeout=1.0)) def test_add_writer_should_disable_qsocket_notifier_on_callback(loop, sock_pair): """Verify that add_writer disables QSocketNotifier during callback.""" def can_write(): nonlocal num_calls num_calls += 1 if num_calls == 2: # Since we get called again, the QSocketNotifier should've been re-enabled before # this call (although disabled during) assert not notifier.isEnabled() fut.set_result(None) client_sock.close() return assert not notifier.isEnabled() num_calls = 0 client_sock, _ = sock_pair fut = asyncio.Future() loop._add_writer(client_sock.fileno(), can_write) notifier = loop._write_notifiers[client_sock.fileno()] loop.run_until_complete(asyncio.wait_for(fut, timeout=1.0)) def test_reader_writer_echo(loop, sock_pair): """Verify readers and writers can send data to each other.""" c_sock, s_sock = sock_pair async def mycoro(): c_reader, c_writer = await asyncio.open_connection(sock=c_sock) s_reader, s_writer = await asyncio.open_connection(sock=s_sock) data = b"Echo... Echo... Echo..." s_writer.write(data) await s_writer.drain() read_data = await c_reader.readexactly(len(data)) assert data == read_data s_writer.close() loop.run_until_complete(asyncio.wait_for(mycoro(), timeout=1.0)) def test_regression_bug13(loop, sock_pair): """Verify that a simple handshake between client and server works as expected.""" c_sock, s_sock = sock_pair client_done, server_done = asyncio.Future(), asyncio.Future() async def server_coro(): s_reader, s_writer = await asyncio.open_connection(sock=s_sock) s_writer.write(b"1") await s_writer.drain() assert (await s_reader.readexactly(1)) == b"2" s_writer.write(b"3") await s_writer.drain() server_done.set_result(True) result1 = None result3 = None async def client_coro(): def cb1(): nonlocal result1 assert result1 is None loop._remove_reader(c_sock.fileno()) result1 = c_sock.recv(1) loop._add_writer(c_sock.fileno(), cb2) def cb2(): nonlocal result3 assert result3 is None c_sock.send(b"2") loop._remove_writer(c_sock.fileno()) loop._add_reader(c_sock.fileno(), cb3) def cb3(): nonlocal result3 assert result3 is None result3 = c_sock.recv(1) client_done.set_result(True) loop._add_reader(c_sock.fileno(), cb1) asyncio.ensure_future(client_coro()) asyncio.ensure_future(server_coro()) both_done = asyncio.gather(client_done, server_done) loop.run_until_complete(asyncio.wait_for(both_done, timeout=1.0)) assert result1 == b"1" assert result3 == b"3" def test_add_reader_replace(loop, sock_pair): c_sock, s_sock = sock_pair callback_invoked = asyncio.Future() called1 = False called2 = False def any_callback(): if not callback_invoked.done(): callback_invoked.set_result(True) loop._remove_reader(c_sock.fileno()) def callback1(): # the "bad" callback: if this gets invoked, something went wrong nonlocal called1 called1 = True any_callback() def callback2(): # the "good" callback: this is the one which should get called nonlocal called2 called2 = True any_callback() async def server_coro(): s_reader, s_writer = await asyncio.open_connection(sock=s_sock) s_writer.write(b"foo") await s_writer.drain() async def client_coro(): loop._add_reader(c_sock.fileno(), callback1) loop._add_reader(c_sock.fileno(), callback2) await callback_invoked loop._remove_reader(c_sock.fileno()) assert (await loop.sock_recv(c_sock, 3)) == b"foo" client_done = asyncio.ensure_future(client_coro()) server_done = asyncio.ensure_future(server_coro()) both_done = asyncio.wait( [server_done, client_done], return_when=asyncio.FIRST_EXCEPTION ) loop.run_until_complete(asyncio.wait_for(both_done, timeout=0.1)) assert not called1 assert called2 def test_add_writer_replace(loop, sock_pair): c_sock, s_sock = sock_pair callback_invoked = asyncio.Future() called1 = False called2 = False def any_callback(): if not callback_invoked.done(): callback_invoked.set_result(True) loop._remove_writer(c_sock.fileno()) def callback1(): # the "bad" callback: if this gets invoked, something went wrong nonlocal called1 called1 = True any_callback() def callback2(): # the "good" callback: this is the one which should get called nonlocal called2 called2 = True any_callback() async def client_coro(): loop._add_writer(c_sock.fileno(), callback1) loop._add_writer(c_sock.fileno(), callback2) await callback_invoked loop._remove_writer(c_sock.fileno()) loop.run_until_complete(asyncio.wait_for(client_coro(), timeout=0.1)) assert not called1 assert called2 def test_remove_reader_idempotence(loop, sock_pair): fd = sock_pair[0].fileno() def cb(): pass removed0 = loop._remove_reader(fd) loop._add_reader(fd, cb) removed1 = loop._remove_reader(fd) removed2 = loop._remove_reader(fd) assert not removed0 assert removed1 assert not removed2 def test_remove_writer_idempotence(loop, sock_pair): fd = sock_pair[0].fileno() def cb(): pass removed0 = loop._remove_writer(fd) loop._add_writer(fd, cb) removed1 = loop._remove_writer(fd) removed2 = loop._remove_writer(fd) assert not removed0 assert removed1 assert not removed2 def test_scheduling(loop, sock_pair): s1, s2 = sock_pair fd = s1.fileno() cb_called = asyncio.Future() def writer_cb(fut): if fut.done(): cb_called.set_exception(ValueError("writer_cb called twice")) fut.set_result(None) def fut_cb(fut): loop._remove_writer(fd) cb_called.set_result(None) fut = asyncio.Future() fut.add_done_callback(fut_cb) loop._add_writer(fd, writer_cb, fut) loop.run_until_complete(cb_called) @pytest.mark.xfail( "sys.version_info < (3,4)", reason="Doesn't work on python older than 3.4", ) def test_exception_handler(loop): handler_called = False coro_run = False loop.set_debug(False) async def future_except(): nonlocal coro_run coro_run = True loop.stop() raise ExceptionTester() def exct_handler(loop, data): nonlocal handler_called handler_called = True loop.set_exception_handler(exct_handler) asyncio.ensure_future(future_except()) loop.run_forever() assert coro_run assert handler_called def test_exception_handler_simple(loop): handler_called = False def exct_handler(loop, data): nonlocal handler_called handler_called = True loop.set_exception_handler(exct_handler) fut1 = asyncio.Future() fut1.set_exception(ExceptionTester()) asyncio.ensure_future(fut1) del fut1 loop.call_later(0.1, loop.stop) loop.run_forever() assert handler_called def test_not_running_immediately_after_stopped(loop): async def mycoro(): assert loop.is_running() await asyncio.sleep(0) loop.stop() assert not loop.is_running() assert not loop.is_running() loop.run_until_complete(mycoro()) assert not loop.is_running() qasync-0.24.1/tests/test_qthreadexec.py000066400000000000000000000023531441516710300201510ustar00rootroot00000000000000# © 2018 Gerard Marull-Paretas # © 2014 Mark Harviston # © 2014 Arve Knudsen # BSD License import pytest import qasync @pytest.fixture def executor(request): exe = qasync.QThreadExecutor(5) request.addfinalizer(exe.shutdown) return exe @pytest.fixture def shutdown_executor(): exe = qasync.QThreadExecutor(5) exe.shutdown() return exe def test_shutdown_after_shutdown(shutdown_executor): with pytest.raises(RuntimeError): shutdown_executor.shutdown() def test_ctx_after_shutdown(shutdown_executor): with pytest.raises(RuntimeError): with shutdown_executor: pass def test_submit_after_shutdown(shutdown_executor): with pytest.raises(RuntimeError): shutdown_executor.submit(None) def test_stack_recursion_limit(executor): # Test that worker threads have sufficient stack size for the default # sys.getrecursionlimit. If not this should fail with SIGSEGV or SIGBUS # (or event SIGILL?) def rec(a, *args, **kwargs): rec(a, *args, **kwargs) fs = [executor.submit(rec, 1) for _ in range(10)] for f in fs: with pytest.raises(RecursionError): f.result()