pax_global_header00006660000000000000000000000064145605405670014526gustar00rootroot0000000000000052 comment=6aeb8a6816585a4bda67e9ea1b125568c18fa702 zigpy-0.62.3/000077500000000000000000000000001456054056700127605ustar00rootroot00000000000000zigpy-0.62.3/.github/000077500000000000000000000000001456054056700143205ustar00rootroot00000000000000zigpy-0.62.3/.github/workflows/000077500000000000000000000000001456054056700163555ustar00rootroot00000000000000zigpy-0.62.3/.github/workflows/ci.yml000066400000000000000000000004531456054056700174750ustar00rootroot00000000000000name: CI on: push: pull_request: ~ jobs: shared-ci: uses: zigpy/workflows/.github/workflows/ci.yml@main with: CODE_FOLDER: zigpy CACHE_VERSION: 2 PRE_COMMIT_CACHE_PATH: ~/.cache/pre-commit PYTHON_VERSION_DEFAULT: 3.9.15 MINIMUM_COVERAGE_PERCENTAGE: 99 zigpy-0.62.3/.github/workflows/matchers/000077500000000000000000000000001456054056700201635ustar00rootroot00000000000000zigpy-0.62.3/.github/workflows/matchers/codespell.json000066400000000000000000000004001456054056700230220ustar00rootroot00000000000000{ "problemMatcher": [ { "owner": "codespell", "severity": "warning", "pattern": [ { "regexp": "^(.+):(\\d+):\\s(.+)$", "file": 1, "line": 2, "message": 3 } ] } ] } zigpy-0.62.3/.github/workflows/matchers/flake8.json000066400000000000000000000011011456054056700222210ustar00rootroot00000000000000{ "problemMatcher": [ { "owner": "flake8-error", "severity": "error", "pattern": [ { "regexp": "^(.*):(\\d+):(\\d+):\\s([EF]\\d{3}\\s.*)$", "file": 1, "line": 2, "column": 3, "message": 4 } ] }, { "owner": "flake8-warning", "severity": "warning", "pattern": [ { "regexp": "^(.*):(\\d+):(\\d+):\\s([CDNW]\\d{3}\\s.*)$", "file": 1, "line": 2, "column": 3, "message": 4 } ] } ] } zigpy-0.62.3/.github/workflows/matchers/python.json000066400000000000000000000005201456054056700223740ustar00rootroot00000000000000{ "problemMatcher": [ { "owner": "python", "pattern": [ { "regexp": "^\\s*File\\s\\\"(.*)\\\",\\sline\\s(\\d+),\\sin\\s(.*)$", "file": 1, "line": 2 }, { "regexp": "^\\s*raise\\s(.*)\\(\\'(.*)\\'\\)$", "message": 2 } ] } ] } zigpy-0.62.3/.github/workflows/matchers/ruff.json000066400000000000000000000011661456054056700220240ustar00rootroot00000000000000{ "problemMatcher": [ { "owner": "ruff-error", "severity": "error", "pattern": [ { "regexp": "^(.*):(\\d+):(\\d+):\\s([EF]\\d{3}\\s.*)$", "file": 1, "line": 2, "column": 3, "message": 4 } ] }, { "owner": "ruff-warning", "severity": "warning", "pattern": [ { "regexp": "^(.*):(\\d+):(\\d+):\\s([CDNW]\\d{3}\\s.*)$", "file": 1, "line": 2, "column": 3, "message": 4 } ] } ] }zigpy-0.62.3/.github/workflows/publish-to-pypi.yml000066400000000000000000000003621456054056700221460ustar00rootroot00000000000000name: Publish distributions to PyPI on: release: types: - published jobs: shared-build-and-publish: uses: zigpy/workflows/.github/workflows/publish-to-pypi.yml@main secrets: PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} zigpy-0.62.3/.github/workflows/stale.yml000066400000000000000000000057141456054056700202170ustar00rootroot00000000000000name: Stale # yamllint disable-line rule:truthy on: schedule: - cron: "0 * * * *" workflow_dispatch: jobs: stale: runs-on: ubuntu-latest steps: # The 180 day stale policy # Used for: # - Issues & PRs # - No PRs marked as no-stale # - No issues marked as no-stale or help-wanted - name: 180 days stale issues & PRs policy uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-stale: 180 days-before-close: 7 operations-per-run: 150 remove-stale-when-updated: true stale-issue-label: "stale" exempt-issue-labels: "no stale,help wanted" stale-issue-message: > There hasn't been any activity on this issue recently. Due to the high number of incoming GitHub notifications, we have to clean some of the old issues, as many of them have already been resolved with the latest updates. Please make sure to update to the latest version and check if that solves the issue. Let us know if that works for you by adding a comment 👍 This issue has now been marked as stale and will be closed if no further activity occurs. Thank you for your contributions. stale-pr-label: "stale" exempt-pr-labels: "no stale" stale-pr-message: > There hasn't been any activity on this pull request recently. This pull request has been automatically marked as stale because of that and will be closed if no further activity occurs within 7 days. Thank you for your contributions. # The 60 day stale policy for issues # Used for: # - Issues that are pending more information (incomplete issues) # - No Issues marked as no-stale or help-wanted # - No PRs (-1) - name: Needs more information stale issues policy uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} only-labels: "needs more information" days-before-stale: 60 days-before-close: 7 days-before-pr-close: -1 operations-per-run: 50 remove-stale-when-updated: true stale-issue-label: "stale" exempt-issue-labels: "no stale,help wanted" stale-issue-message: > There hasn't been any activity on this issue recently. Due to the high number of incoming GitHub notifications, we have to clean some of the old issues, as many of them have already been resolved with the latest updates. Please make sure to update to the latest version and check if that solves the issue. Let us know if that works for you by adding a comment 👍 This issue has now been marked as stale and will be closed if no further activity occurs. Thank you for your contributions. zigpy-0.62.3/.gitignore000066400000000000000000000015101456054056700147450ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache coverage.xml *,cover .pytest_cache/ # Translations *.mo *.pot # Sphinx documentation docs/_build/ # PyBuilder target/ # pyenv .python-version # dotenv .env # virtualenv .venv/ venv/ ENV/ # Editor temp files .*.swp # Visual Studio Code .vscode .DS_Store zigpy-0.62.3/.pre-commit-config.yaml000066400000000000000000000021731456054056700172440ustar00rootroot00000000000000repos: - repo: https://github.com/asottile/pyupgrade rev: v3.8.0 hooks: - id: pyupgrade args: [--py38-plus] - repo: https://github.com/PyCQA/autoflake rev: v2.2.0 hooks: - id: autoflake - repo: https://github.com/psf/black rev: 23.3.0 hooks: - id: black args: - --quiet - repo: https://github.com/pycqa/flake8 rev: 6.0.0 hooks: - id: flake8 entry: pflake8 additional_dependencies: - pyproject-flake8==6.0.0.post1 - repo: https://github.com/PyCQA/isort rev: 5.12.0 hooks: - id: isort - repo: https://github.com/codespell-project/codespell rev: v2.2.5 hooks: - id: codespell additional_dependencies: [tomli] args: ["--toml", "pyproject.toml"] - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 hooks: - id: mypy additional_dependencies: - pydantic - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.0.277 hooks: - id: ruff args: ["--fix", "--exit-non-zero-on-fix", "--config", "pyproject.toml"] zigpy-0.62.3/CODE_OF_CONDUCT.md000066400000000000000000000132631456054056700155640ustar00rootroot00000000000000# Contributor Covenant Code of Conduct for zigpy ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [safety@home-assistant.io][email] or by using the report/flag feature of the medium used. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available [here][version]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][mozilla]. ## Adoption This Code of Conduct was first adopted on January 21st, 2017, and announced in [this][coc-blog] blog post and has been updated on May 25th, 2020 to version 2.0 of the [Contributor Covenant][homepage] as announced in [this][coc2-blog] blog post. For answers to common questions about this code of conduct, see the FAQ at . Translations are available at . [coc-blog]: https://www.home-assistant.io/blog/2017/01/21/home-assistant-governance/ [coc2-blog]: https://www.home-assistant.io/blog/2020/05/25/code-of-conduct-updated/ [email]: mailto:safety@home-assistant.io [homepage]: http://contributor-covenant.org [mozilla]: https://github.com/mozilla/diversity [version]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html zigpy-0.62.3/CONTRIBUTING.md000066400000000000000000000434541456054056700152230ustar00rootroot00000000000000# Contribute to the zigpy project This file contains information for end-users, testers and developers on how-to contribute to the zigpy project. It will include guides on how to how to install, use, troubleshoot, debug, code and more. You can contribute to this project either as an normal end-user, a tester (advanced user contributing constructive issue/bug-reports) or as a developer contributing enhancing code. ## How to contribute as an end-user If you think that you are having problems due to a bug then please see the section below on reporting issues as a tester, but be aware that reporting issues put higher responsibility on your active involvement on your part as a tester. Some developers might be also interested in receiving donations in the form of money or hardware such as Zigbee modules and devices, and even if such donations are most often donated with no strings attached it could in many cases help the developers motivation and indirectly improve the development of this project. Sometimes it might just be simpler to just donate money earmarked to specifically let a willing developer buy the exact same type Zigbee device that you are having issues with to be able to replicate the issue themselves in order to troubleshoot and hopefully also solve the problem. Consider submitting a post on GitHub projects issues tracker about willingness to making a donation (please see section below on posing issues). ### How to report issues or bugs as a tester Issues or bugs are normally first to be submitted upstream to the software/project that is utilizing zigpy and its radio libraries, (like for example Home Assistant), however if and when the issue is determined to be in the zigpy or underlying radio library then you should continue by submitting a detailed issue/bug report via the GitHub projects issues tracker. Always be sure to first check if there is not already an existing issue posted with the same description before posting a new issue. - https://help.github.com/en/github/managing-your-work-on-github/creating-an-issue - https://guides.github.com/features/issues/ ### Testing new releases Testing a new release of the zigpy library before it is released in Home Assistant. If you are using Supervised Home Assistant (formerly known as the Hassio/Hass.io distro): - Add https://github.com/home-assistant/hassio-addons-development as "add-on" repository - Install "Custom deps deployment" addon - Update config like: ``` pypi: - zigpy==0.20.0 apk: [] ``` where 0.20.0 is the new version - Start the addon If you are instead using some custom python installation of Home Assistant then do this: - Activate your python virtual env - Update package with ``pip`` ``` pip install zigpy==0.20.0 ### Troubleshooting For troubleshooting with Home Assistant, the general recommendation is to first only enable DEBUG logging for homeassistant.core and homeassistant.components.zha in Home Assistant, then look in the home-assistant.log file and try to get the Home Assistant community to exhausted their combined troubleshooting knowledge of the ZHA component before posting issue directly to a radio library, like example zigpy-deconz or zigpy-xbee. That is, begin with checking debug logs for Home Assistant core and the ZHA component first, (troubleshooting/debugging from the top down instead of from the bottom up), trying to getting help via Home Assistant community forum before moving on to posting debug logs to zigpy and radio libraries. This is a general suggestion to help filter away common problems and not flood the zigpy-cc developer(s) with too many logs. Please also try the very latest versions of zigpy and the radio library, (see the section above about "Testing new releases"), and only if you still have the same issues with the latest versions then enable debug logging for zigpy and the radio libraries in Home Assistant in addition to core and zha. Once enabled debug logging for all those libraries in Home Assistant you should try to reproduce the problem and then raise an issue to the zigpy repo (or to a specific radio library) repo with a copy of those logs. To enable debugging in Home Assistant to get debug logs, either update logger configuration section in configuration.yaml or call logger.set_default_level service with {"level": "debug"} data. Check logger component configuration where you want something this in your configuration.yaml logger: default: info logs: asyncio: debug homeassistant.core: debug homeassistant.components.zha: debug zigpy: debug bellows: debug zigpy_znp: debug zigpy_xbee: debug zigpy_deconz: debug zigpy_zigate: debug ## How to contribute as a developer If you are looking to make a contribution as a developer to this project we suggest that you follow the steps in these guides: - https://github.com/firstcontributions/first-contributions/blob/master/README.md - https://github.com/firstcontributions/first-contributions/blob/master/github-desktop-tutorial.md Code changes or additions can then be submitted to this project on GitHub via pull requests: - https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests - https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request In general when contributing code to this project it is encouraged that you try to follow the coding standards: - First [raise issues on GitHub](https://github.com/zigpy/zigpy/issues) before working on an enhancement to provide coordination with other contributors. - Try to keep each pull request short and only a single PR per enhancement as this makes tracking and reviewing easier. - All code is formatted with black. The check format script that runs in CI will ensure that code meets this requirement and that it is correctly formatted with black. Instructions for installing black in many editors can be found here: https://github.com/psf/black#editor-integration - Ideally, you should aim to achieve full coverage of any code changes with tests. - Recommend read and follow [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). - Recommend read and follow [Clifford Programming Style](http://www.clifford.at/style.html). - Recommend code style use [standard naming conventions for Python](https://medium.com/@dasagrivamanu/python-naming-conventions-the-10-points-you-should-know-149a9aa9f8c7). - Recommend use [Semantic Versioning](http://semver.org/) for libraries and dependencies if possible. - Contributions must be your own and you must agree with the license. - All code for this project should aim to be licensed under [GNU GENERAL PUBLIC LICENSE Version 3](https://raw.githubusercontent.com/zigpy/zigpy/dev/LICENSE). ### Installation for use in a new project #### Prerequicites It is recommended that code is formatted with `black` and sorted with `isort`. The check format script that runs in CI will ensure that code meets this requirement and that it is correctly formatted with black. Instructions for installing black in many editors can be found here: https://github.com/psf/black#editor-integration - https://github.com/psf/black - https://github.com/PyCQA/isort #### Setup To setup a development environment, fork the repository and create a virtual environment: ```shell $ git clone git@github.com:youruser/zigpy.git $ cd zigpy $ virtualenv -p python3.8 venv $ source venv/bin/activate (venv) $ pip install --upgrade pip pre-commit tox (venv) $ pre-commit install # install pre-commit as a Git hook (venv) $ pip install -e '.[testing]' # installs zigpy+testing deps into the venv in dev mode ``` At this point `black` and `isort` will be run by the pre-commit hook, reformatting your code automatically to match the rest of the project. ### Unit testing Run `pytest -lv`, which will show you a stack trace and all the local variables when something breaks. It is recommended that you install Python 3.8, 3.9, 3.10 and 3.11 so that you can run `tox` from the root project folder and see exactly what the CI system will tell you without having to wait for Github Actions or Coveralls. Code coverage information will be written by tox to `htmlcov/index.html`. ### The zigpy API This section is meant to describe the zigpy API (Application Programming Interface) and how-to to use it. #### Application * raw_device_initialized * device_initialized * device_removed * device_joined * device_left #### Device * node_descriptor_updated * device_init_failure * device_relays_updated #### Endpoint * unknown_cluster_message * member_added * member_removed #### Group * group_added * group_member_added * group_removed * group_removed #### ZCL Commands * cluster_command * general_command * attribute_updated * device_announce * permit_duration ### Developer references Reference collections for different hardware specific Zigbee Stack and related manufacturer documentation. - https://github.com/zigpy/zigpy/discussions/595 Silicon Labs video playlist of ZigBee Concepts: Architecture basics, MAC/PHY, node types, and application profiles - https://www.youtube.com/playlist?list=PL-awFRrdECXvAs1mN2t2xaI0_bQRh2AqD ### zigpy wiki and communication channels - https://github.com/zigpy/zigpy/wiki - https://github.com/zigpy/zigpy/discussions - https://github.com/zigpy/zigpy/issues ### Zigbee specifications - [Zigbee PRO 2017 (R22) Protocol Specification](https://zigbeealliance.org/wp-content/uploads/2019/11/docs-05-3474-21-0csg-zigbee-specification.pdf) - [Zigbee Cluster Library (R8)](https://zigbeealliance.org/wp-content/uploads/2021/10/07-5123-08-Zigbee-Cluster-Library.pdf) - [Zigbee Base Device Behavior Specification (V1.0)](https://zigbeealliance.org/wp-content/uploads/zip/zigbee-base-device-behavior-bdb-v1-0.zip) - [Zigbee Lighting & Occupancy Device Specification (V1.0)](https://zigbeealliance.org/wp-content/uploads/2019/11/docs-15-0014-05-0plo-Lighting-OccupancyDevice-Specification-V1.0.pdf) - [Zigbee Primer](https://docs.smartthings.com/en/latest/device-type-developers-guide/zigbee-primer.html) ## Official release packages available via PyPI New packages of tagged versions are also released via the "zigpy" project on PyPI - https://pypi.org/project/zigpy/ - https://pypi.org/project/zigpy/#history - https://pypi.org/project/zigpy/#files Older packages of tagged versions are still available on the "zigpy-homeassistant" project on PyPI - https://pypi.org/project/zigpy-homeassistant/ Packages of tagged versions of the radio libraries are released via separate projects on PyPI - https://pypi.org/project/zigpy/ - https://pypi.org/project/zha-quirks/ - https://pypi.org/project/bellows/ - https://pypi.org/project/zigpy-znp/ - https://pypi.org/project/zigpy-deconz/ - https://pypi.org/project/zigpy-xbee/ - https://pypi.org/project/zigpy-zigate/ - https://pypi.org/project/zigpy-cc/ (obsolete as replaced by zigpy-znp) ## Related projects ### zigpy-cli (zigpy command line interface) [zigpy-cli](https://github.com/zigpy/zigpy-cli) is a unified command line interface for zigpy radios. The goal of this project is to allow low-level network management from an intuitive command line interface and to group useful Zigbee tools into a single binary. ### ZHA Device Handlers ZHA deviation handling in Home Assistant relies on the third-party [ZHA Device Handlers](https://github.com/zigpy/zha-device-handlers) project (also known unders zha-quirks package name on PyPI). Zigbee devices that deviate from or do not fully conform to the standard specifications set by the [Zigbee Alliance](https://www.zigbee.org) may require the development of custom [ZHA Device Handlers](https://github.com/zigpy/zha-device-handlers) (ZHA custom quirks handler implementation) to for all their functions to work properly with the ZHA component in Home Assistant. These ZHA Device Handlers for Home Assistant can thus be used to parse custom messages to and from non-compliant Zigbee devices. The custom quirks implementations for zigpy implemented as ZHA Device Handlers for Home Assistant are a similar concept to that of [Hub-connected Device Handlers for the SmartThings platform](https://docs.smartthings.com/en/latest/device-type-developers-guide/) as well as that of [zigbee-herdsman converters as used by Zigbee2mqtt](https://www.zigbee2mqtt.io/how_tos/how_to_support_new_devices.html), meaning they are each virtual representations of a physical device that expose additional functionality that is not provided out-of-the-box by the existing integration between these platforms. ### ZHA integration component for Home Assistant [ZHA integration component for Home Assistant](https://www.home-assistant.io/integrations/zha/) is a reference implementation of the zigpy library as integrated into the core of **[Home Assistant](https://www.home-assistant.io)** (a Python based open source home automation software). There are also other GUI and non-GUI projects for Home Assistant's ZHA components which builds on or depends on its features and functions to enhance or improve its user-experience, some of those are listed and linked below. #### ZHA Toolkit [ZHA Toolkit](https://github.com/mdeweerd/zha-toolkit) is a custom service for "rare" Zigbee operations using the [ZHA integration component](https://www.home-assistant.io/integrations/zha) in [Home Assistant](https://www.home-assistant.io/). The purpose of ZHA Toolkit and its Home Assistant 'Services' feature, is to provide direct control over low level zigbee commands provided in ZHA or zigpy that are not otherwise available or too limited for some use cases. ZHA Toolkit can also; serve as a framework to do local low level coding (the modules are reloaded on each call), provide access to some higher level commands such as ZNP backup (and restore), make it easier to perform one-time operations where (some) Zigbee knowledge is sufficient and avoiding the need to understand the inner workings of ZHA or Zigpy (methods, quirks, etc). #### ZHA Custom [zha_custom](https://github.com/Adminiuga/zha_custom) (unmaintained project) is a custom component package for Home Assistant (with its ZHA component for zigpy integration) that acts as zigpy commands service wrapper, when installed it allows you to enter custom commands via to zigy to example change advanced configuration and settings that are not available in the UI. #### ZHA Map [zha-map](https://github.com/zha-ng/zha-map) for Home Assistant's ZHA component can build a Zigbee network topology map. #### ZHA Network Visualization Card [zha-network-visualization-card](https://github.com/dmulcahey/zha-network-visualization-card) was a custom Lovelace element for Home Assistant which visualize the Zigbee network for the ZHA component. #### ZHA Network Card [zha-network-card](https://github.com/dmulcahey/zha-network-card) was a custom Lovelace card for Home Assistant that displays ZHA component Zigbee network and device information in Home Assistant #### Zigzag [Zigzag](https://github.com/Samantha-uk/zigzag-v1) was a custom card/panel for [Home Assistant](https://www.home-assistant.io/) that displays a graphical layout of Zigbee devices and the connections between them. Zigzag could be installed as a panel or a custom card and relies on the data provided by the [zha-map](https://github.com/zha-ng/zha-map) integration component. #### ZHA Device Exporter [zha-device-exporter](https://github.com/dmulcahey/zha-device-exporter) is a custom component for Home Assistant to allow the ZHA component to export lists of Zigbee devices. #### ZHA Custom Radios [zha-custom-radios](https://github.com/zha-ng/zha-custom-radios) A now obsolete custom component package for Home Assistant (with its ZHA component for zigpy integration) that allows users to test out new zigpy radio libraries and hardware modules before they have officially been integrated into ZHA. This enables developers and testers to test new or updated zigpy radio modules without having to modify the Home Assistant source code. #### Zigpy Deconz Parser [zigpy-deconz-parser](https://github.com/zha-ng/zigpy-deconz-parser) allow you to parse Home Assistant's ZHA component debug log using `zigpy-deconz` library if you are using a deCONZ based adapter like ConBee or RaspBee. ### Zigbee for Domoticz Plugin [Zigbee for Domoticz Plugin](https://www.domoticz.com/wiki/ZigbeeForDomoticz) is and addon for [Domoticz home automation software](https://www.domoticz.com/) with hardware independent Zigbee Coordinator support achieved via dependency on [zigpy], with the exception of Zigate (which it still continues to manage and handle in native mode as this plugin was originally the mature "Zigate plugin" for Domoticz). Domoticz-Zigbee project available at https://github.com/zigbeefordomoticz/Domoticz-Zigbee and wiki at https://zigbeefordomoticz.github.io/wiki/ ### Zigbee for Jeedom [Zigbee plugin for Jeedom](https://doc.jeedom.com/en_US/plugins/automation%20protocol/zigbee/) is and official addon for [Jeedom home automation software]([https://www.domoticz.com/](https://jeedom.com/en/)) which depends on [zigpy] for hardware independent Zigbee Coordinator support. While free and open source licensed the source code for this Zigbee plugin is currently not available for direct download on a public website, as instead independent developers and users of Jeedom can only download the code by installing Jeedom and [purchasing the plugin from their Jeedom online marketplace for around €6](https://market.jeedom.com/index.php?v=d&p=market_display&id=4050) (at least as it stands in May in 2022). ### ZigCoHTTP [ZigCoHTTP](https://github.com/daniel17903/ZigCoHTTP) (unmaintained and now abandoned) was a stand-alone python application project that creates a ZigBee network using zigpy and bellows. ZigBee devices joining this network can be controlled via a HTTP API. It was developed for a Raspberry Pi using a [Matrix Creator Board](https://www.matrix.one/products/creator) but should also work with other computers with Silicon Labs Zigbee hardware, or with other Zigbee hardware if replace bellows with other radio library for zigpy. zigpy-0.62.3/COPYING000066400000000000000000000012151456054056700140120ustar00rootroot00000000000000zigpy Copyright (C) 2018 Russell Cloran This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . zigpy-0.62.3/Contributors.md000066400000000000000000000021071456054056700157770ustar00rootroot00000000000000# Contributors - [Russell Cloran] (https://github.com/rcloran) - [Alexei Chetroi] (https://github.com/Adminiuga) - [damarco] (https://github.com/damarco) - [Andreas Bomholtz] (https://github.com/AndreasBomholtz) - [puddly] (https://github.com/puddly) - [presslab-us] (https://github.com/presslab-us) - [Igor Bernstein] (https://github.com/igorbernstein2) - [David F. Mulcahey] (https://github.com/dmulcahey) - [Yoda-x] (https://github.com/Yoda-x) - [Solomon_M] (https://github.com/zalke) - [Pascal Vizeli] (https://github.com/pvizeli) - [prairiesnpr] (https://github.com/prairiesnpr) - [Jurriaan Pruis] (https://github.com/jurriaan) - [Marcel Hoppe] (https://github.com/hobbypunk90) - [felixstorm] (https://github.com/felixstorm) - [Dinko Bajric] (https://github.com/dbajric) - [Abílio Costa] (https://github.com/abmantis) - [https://github.com/SchaumburgM] (https://github.com/SchaumburgM) - [https://github.com/Nemesis24] (https://github.com/Nemesis24) - [Hedda] (https://github.com/Hedda) - [Andreas Setterlind] (https://github.com/Gamester17) - [lisongjun] (https://github.com/lisongjun12) zigpy-0.62.3/LICENSE000066400000000000000000001045131456054056700137710ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . zigpy-0.62.3/README.md000066400000000000000000000166601456054056700142500ustar00rootroot00000000000000# zigpy [![Build](https://github.com/zigpy/zigpy/workflows/CI/badge.svg?branch=dev)](https://github.com/zigpy/zigpy/workflows/CI/badge.svg?branch=dev) [![Coverage Status](https://codecov.io/gh/zigpy/zigpy/branch/dev/graph/badge.svg)](https://codecov.io/gh/zigpy/zigpy) **[zigpy](https://github.com/zigpy/zigpy)** is a hardware independent **[Zigbee protocol stack](https://en.wikipedia.org/wiki/Zigbee)** integration project to implement **[Zigbee](https://www.zigbee.org/)** standard specifications as a Python 3 library. Zigbee integration via zigpy allows you to connect one of many off-the-shelf Zigbee Coordinator adapters using one of the available Zigbee radio library modules compatible with zigpy to control Zigbee based devices. There is currently support for controlling Zigbee device types such as binary sensors (e.g., motion and door sensors), sensors (e.g., temperature sensors), lights, switches, buttons, covers, fans, climate control equipment, locks, and intruder alarm system devices. Note that Zigbee Green Power devices [currently are unsupported](https://github.com/zigpy/zigpy/issues/341). Zigbee stacks and hardware from many different hardware chip manufacturers are supported via radio libraries which translate their proprietary communication protocol into a common API which is shared among all radio libraries for zigpy. If some Zigbee stack or Zigbee Coordinator hardware for other manufacturers is not supported by yet zigpy it is possible for any independent developer to step-up and develop a new radio library for zigpy which translates its proprietary communication protocol into the common API that zigpy can understand. zigpy contains common code implementing ZCL (Zigbee Cluster Library) and ZDO (Zigbee Device Object) application state management which is being used by various radio libraries implementing the actual interface with the radio modules from different manufacturers. The separate radio libraries interface with radio hardware adapters/modules over USB and GPIO using different native UART serial protocols. The **[ZHA integration component for Home Assistant](https://www.home-assistant.io/integrations/zha/)**, the [Zigbee Plugin for Domoticz](https://www.domoticz.com/wiki/ZigbeeForDomoticz), and the [Zigbee Plugin for Jeedom](https://doc.jeedom.com/en_US/plugins/automation%20protocol/zigbee/) (competing open-source home automation software) are all using [zigpy libraries](https://github.com/zigpy/) as dependencies, as such they could be used as references of different implementations if looking to integrate a Zigbee solution into your application. ### Zigbee device OTA updates zigpy have ability to download and perform Zigbee OTAU (Over-The-Air Updates) of Zigbee devices firmware. The Zigbee OTA update firmware image files should conform to standard Zigbee OTA format and OTA provider source URLs need to be published for public availability. Updates from a local OTA update directory also is also supported and can be used as an option for offline firmware updates if user provide correct Zigbee OTA formatted firmware files themselves. Support for automatic download from existing online OTA providers in zigpy OTA provider code is currently only available for IKEA, Inovelli, LEDVANCE/OSRAM, SALUS/Computime, and SONOFF/ITEAD devices. Support for additional OTA providers for other manufacturers devices could be added to zigpy in the future, if device manufacturers publish their firmware images publicly and developers contribute the needed download code for them. ## How to install and test, report bugs, or contribute to this project For specific instructions on how-to install and test zigpy or contribute bug-reports and code to this project please see the guidelines in the CONTRIBUTING.md file: - [Guidelines in CONTRIBUTING.md](./CONTRIBUTING.md) This CONTRIBUTING.md file will contain information about using zigpy, testing new releases, troubleshooting and bug-reporting as, as well as library + code instructions for developers and more. This file also contain short summeries and links to other related projects that directly or indirectly depends in zigpy libraries. You can contribute to this project either as an end-user, a tester (advanced user contributing constructive issue/bug-reports) or as a developer contributing code. ## Compatible Zigbee coordinator hardware Radio libraries for zigpy are separate projects with their own repositories and include **[bellows](https://github.com/zigpy/bellows)** (for communicating with Silicon Labs EmberZNet based radios), **[zigpy-deconz](https://github.com/zigpy/zigpy-deconz)** (for communicating with deCONZ based radios from Dresden Elektronik), and **[zigpy-xbee](https://github.com/zigpy/zigpy-xbee)** (for communicating with XBee based Zigbee radios), **[zigpy-zigate](https://github.com/zigpy/zigpy-zigate)** for communicating with ZiGate based radios, **[zigpy-znp](https://github.com/zha-ng/zigpy-znp)** or **[zigpy-cc](https://github.com/zigpy/zigpy-cc)** for communicating with Texas Instruments based radios that have Z-Stack ZNP coordinator firmware. Note! Zigbee 3.0 support or not in zigpy depends primarily on your Zigbee coordinator hardware and its firmware. Some Zigbee coordinator hardware support Zigbee 3.0 but might be shipped with an older firmware which does not, in which case may want to upgrade the firmware manually yourself. Some other Zigbee coordinator hardware may not support a firmware that is capable of Zigbee 3.0 at all but can still be fully functional and feature complete for your needs, (this is very common as many if not most Zigbee devices do not yet Zigbee 3.0 or are backwards-compable with a Zigbee profile that is support by your Zigbee coordinator hardware and its firmware). As a general rule, newer Zigbee coordinator hardware released can normally support Zigbee 3.0 firmware and it is up to its manufacturer to make such firmware available for them. ### Compatible zigpy radio libraries - **Digi XBee** based Zigbee radios via the [zigpy-xbee](https://github.com/zigpy/zigpy-xbee) library for zigpy. - **dresden elektronik** deCONZ based Zigbee radios via the [zigpy-deconz](https://github.com/zigpy/zigpy-deconz) library for zigpy. - **Silicon Labs** (EmberZNet) based Zigbee radios using the EZSP protocol via the [bellows](https://github.com/zigpy/bellows) library for zigpy. - **Texas Instruments** based Zigbee radios with all compatible Z-Stack firmware via the [zigpy-znp](https://github.com/zha-ng/zigpy-znp) library for zigpy. - **ZiGate** based ZigBee radios via the [zigpy-zigate](https://github.com/zigpy/zigpy-zigate) library for zigpy. ### Legacy or obsolete zigpy radio libraries - Texas Instruments with Z-Stack legacy firmware via the [zigpy-cc](https://github.com/zigpy/zigpy-cc) library for zigpy. ## Release packages available via PyPI New packages of tagged versions are also released via the "zigpy" project on PyPI - https://pypi.org/project/zigpy/ - https://pypi.org/project/zigpy/#history - https://pypi.org/project/zigpy/#files Older packages of tagged versions are still available on the "zigpy-homeassistant" project on PyPI - https://pypi.org/project/zigpy-homeassistant/ Packages of tagged versions of the radio libraries are released via separate projects on PyPI - https://pypi.org/project/zigpy/ - https://pypi.org/project/bellows/ - https://pypi.org/project/zigpy-cc/ - https://pypi.org/project/zigpy-deconz/ - https://pypi.org/project/zigpy-xbee/ - https://pypi.org/project/zigpy-zigate/ - https://pypi.org/project/zigpy-znp/ zigpy-0.62.3/pyproject.toml000066400000000000000000000113651456054056700157020ustar00rootroot00000000000000[build-system] requires = ["setuptools>=61.0.0", "wheel", "setuptools-git-versioning<2"] build-backend = "setuptools.build_meta" [project] name = "zigpy" dynamic = ["version"] description = "Library implementing a Zigbee stack" urls = {repository = "https://github.com/zigpy/zigpy"} authors = [ {name = "Russell Cloran", email = "rcloran@gmail.com"} ] readme = "README.md" license = {text = "GPL-3.0"} requires-python = ">=3.8" dependencies = [ "attrs", "aiohttp", "aiosqlite>=0.16.0", "async_timeout", "crccheck", "cryptography", 'importlib_resources; python_version<"3.9"', 'async-timeout; python_version<"3.11"', "voluptuous", 'pyserial-asyncio; platform_system!="Windows"', 'pyserial-asyncio!=0.5; platform_system=="Windows"', "typing_extensions", ] [tool.setuptools.packages.find] exclude = ["tests", "tests.*"] [tool.setuptools.package-data] "*" = ["appdb_schemas/schema_v*.sql"] [project.optional-dependencies] testing = [ "tomli", "asynctest", "coveralls", "coverage[toml]", "pytest", "pytest-asyncio", "pytest-cov", "pytest-timeout", "freezegun", "ruff==0.0.261", 'pysqlite3-binary; platform_system=="Linux" and python_version<"3.12"', ] [tool.setuptools-git-versioning] enabled = true [tool.pytest.ini_options] asyncio_mode = "auto" [tool.ruff] target-version = "py38" select = [ "B007", # Loop control variable {name} not used within loop body "B014", # Exception handler with duplicate exception "C", # complexity "D", # docstrings "E", # pycodestyle "F", # pyflakes/autoflake "ICN001", # import concentions; {name} should be imported as {asname} "PGH004", # Use specific rule codes when using noqa "PLC0414", # Useless import alias. Import alias does not rename original package. "SIM105", # Use contextlib.suppress({exception}) instead of try-except-pass "SIM118", # Use {key} in {dict} instead of {key} in {dict}.keys() "SIM201", # Use {left} != {right} instead of not {left} == {right} "SIM212", # Use {a} if {a} else {b} instead of {b} if not {a} else {a} "SIM300", # Yoda conditions. Use 'age == 42' instead of '42 == age'. "SIM401", # Use get from dict with default instead of an if block "T20", # flake8-print "RUF006", # Store a reference to the return value of asyncio.create_task "UP", # pyupgrade "W", # pycodestyle ] ignore = [ "D202", # No blank lines allowed after function docstring "D203", # 1 blank line required before class docstring "D213", # Multi-line docstring summary should start at the second line "D406", # Section name should end with a newline "D407", # Section name underlining "E501", # line too long "E731", # do not assign a lambda expression, use a def "SIM117", # Use a single `with` statement with multiple contexts "D", # FIXME: docstrings "C901", # is too complex (15 > 10) ] exclude = [ ".venv", ".git", ".tox", "docs", "venv", "bin", "lib", "deps", "build", ] line-length = 88 [tool.ruff.per-file-ignores] "tests/*.py" = ["F811"] "zigpy/ota/provider.py" = ["SIM117"] [tool.pyupgrade] py37plus = true [tool.autoflake8] in-place = true recursive = false expand-star-imports = false exclude = [".venv", ".git", ".tox", "docs", "venv", "bin", "lib", "deps", "build"] [tool.flake8] exclude = [".venv", ".git", ".tox", "docs", "venv", "bin", "lib", "deps", "build"] # To work with Black max-line-length = 88 ignore = [ "W503", # W503: Line break occurred before a binary operator "E203", # E203: Whitespace before ':' "E501", # E501: line too long ] per-file-ignores = """ tests/*.py: F811 """ [tool.isort] profile = "black" # will group `import x` and `from x import` of the same module. force_sort_within_sections = true known_first_party = ["zigpy", "tests"] forced_separate = "tests" combine_as_imports = true [tool.codespell] ignore-words-list = "ser,nd,hass" skip = ["./.*", "test/*", "pyproject.toml"] quiet-level = 2 [tool.mypy] plugins = ["pydantic.mypy"] ignore_missing_imports = true install_types = true non_interactive = true show_error_codes = true show_error_context = true error_summary = true disable_error_code = [ # Only a few notifications left: "type-arg", # Only a few notifications left: "return-value", # operator breaks in CI (zigpy.types.basic), but not locally "operator", "valid-type", "misc", "attr-defined", "assignment", "arg-type" ] # Only report on selected files follow_imports = "silent" [tool.coverage.run] source = ["zigpy"] [tool.coverage.report] exclude_lines = [ "pragma: no cover", "if TYPE_CHECKING:", "if typing.TYPE_CHECKING:", "raise NotImplementedError", "raise NotImplementedError()", ]zigpy-0.62.3/requirements_test.txt000066400000000000000000000003141456054056700173010ustar00rootroot00000000000000# Test dependencies asynctest coveralls coverage[toml] pytest pytest-asyncio pytest-cov pytest-timeout freezegun ruff==0.0.261 tomli pysqlite3-binary; platform_system=="Linux" and python_version<"3.12" zigpy-0.62.3/ruff.toml000066400000000000000000000055411456054056700146240ustar00rootroot00000000000000target-version = "py38" select = [ "B007", # Loop control variable {name} not used within loop body "B014", # Exception handler with duplicate exception "C", # complexity "D", # docstrings "E", # pycodestyle "F", # pyflakes/autoflake "ICN001", # import concentions; {name} should be imported as {asname} "PGH004", # Use specific rule codes when using noqa "PLC0414", # Useless import alias. Import alias does not rename original package. "SIM105", # Use contextlib.suppress({exception}) instead of try-except-pass "SIM117", # Merge with-statements that use the same scope "SIM118", # Use {key} in {dict} instead of {key} in {dict}.keys() "SIM201", # Use {left} != {right} instead of not {left} == {right} "SIM212", # Use {a} if {a} else {b} instead of {b} if not {a} else {a} "SIM300", # Yoda conditions. Use 'age == 42' instead of '42 == age'. "SIM401", # Use get from dict with default instead of an if block "T20", # flake8-print "TRY004", # Prefer TypeError exception for invalid type "RUF006", # Store a reference to the return value of asyncio.create_task "UP", # pyupgrade "W", # pycodestyle ] ignore = [ "D100", # Missing docstring in public module "D101", # Missing docstring in public class "D102", # Missing docstring in public method "D103", # Missing docstring in public function "D104", # Missing docstring in public package "D105", # Missing docstring in magic method "D106", # Missing docstring in public nested class "D107", # Missing docstring in `__init__` "D202", # No blank lines allowed after function docstring "D203", # 1 blank line required before class docstring "D205", # 1 blank line required between summary line and description "D213", # Multi-line docstring summary should start at the second line "D400", # First line should end with a period "D401", # First line of docstring should be in imperative mood: "D406", # Section name should end with a newline "D407", # Section name underlining "D415", # First line should end with a period, question mark, or exclamation point "E501", # line too long # the rules below this line should be corrected "E731", # do not assign a lambda expression, use a def "B007", # Loop control variable `id_` not used within loop body "PGH004", # Use specific rule codes when using `noqa` "TRY004", # Prefer `TypeError` exception for invalid type ] extend-exclude = [ "tests" ] [flake8-pytest-style] fixture-parentheses = false [pyupgrade] keep-runtime-typing = true [isort] # will group `import x` and `from x import` of the same module. force-sort-within-sections = true known-first-party = [ "zigpy", "tests", ] forced-separate = ["tests"] combine-as-imports = true [mccabe] max-complexity = 25 zigpy-0.62.3/setup.py000066400000000000000000000001051456054056700144660ustar00rootroot00000000000000import setuptools if __name__ == "__main__": setuptools.setup() zigpy-0.62.3/tests/000077500000000000000000000000001456054056700141225ustar00rootroot00000000000000zigpy-0.62.3/tests/__init__.py000066400000000000000000000000251456054056700162300ustar00rootroot00000000000000"""Tests modules.""" zigpy-0.62.3/tests/async_mock.py000066400000000000000000000017311456054056700166240ustar00rootroot00000000000000"""Mock utilities that are async aware.""" from unittest.mock import * # noqa: F401, F403 class _IntSentinelObject(int): """ Sentinel-like object that is also an integer subclass. Allows sentinels to be used in loggers that perform int-specific string formatting. """ def __new__(cls, name): instance = super().__new__(cls, 0) instance.name = name return instance def __repr__(self): return "int_sentinel.%s" % self.name def __hash__(self): return hash((int(self), self.name)) def __eq__(self, other): return self is other __str__ = __reduce__ = __repr__ class _IntSentinel: def __init__(self): self._sentinels = {} def __getattr__(self, name): if name == "__bases__": raise AttributeError return self._sentinels.setdefault(name, _IntSentinelObject(name)) def __reduce__(self): return "int_sentinel" int_sentinel = _IntSentinel() zigpy-0.62.3/tests/conftest.py000066400000000000000000000201351456054056700163220ustar00rootroot00000000000000"""Common fixtures.""" from __future__ import annotations import asyncio import copy import logging import sys import threading import typing from unittest.mock import Mock import pytest import zigpy.application from zigpy.config import ( CONF_DATABASE, CONF_DEVICE, CONF_DEVICE_PATH, CONF_OTA, CONF_OTA_IKEA, CONF_OTA_INOVELLI, CONF_OTA_LEDVANCE, CONF_OTA_SALUS, CONF_OTA_SONOFF, CONF_OTA_THIRDREALITY, ) import zigpy.state as app_state import zigpy.types as t import zigpy.zdo.types as zdo_t from .async_mock import AsyncMock, MagicMock if typing.TYPE_CHECKING: import zigpy.device _LOGGER = logging.getLogger(__name__) NCP_IEEE = t.EUI64.convert("aa:11:22:bb:33:44:be:ef") class FailOnBadFormattingHandler(logging.Handler): def emit(self, record): try: record.msg % record.args except Exception as e: pytest.fail( f"Failed to format log message {record.msg!r} with {record.args!r}: {e}" ) @pytest.fixture(autouse=True) def raise_on_bad_log_formatting(): handler = FailOnBadFormattingHandler() root = logging.getLogger() root.addHandler(handler) root.setLevel(logging.DEBUG) try: yield finally: root.removeHandler(handler) class App(zigpy.application.ControllerApplication): async def send_packet(self, packet): pass async def connect(self): pass async def disconnect(self): pass async def start_network(self): dev = add_initialized_device( app=self, nwk=self.state.node_info.nwk, ieee=self.state.node_info.ieee ) dev.model = "Coordinator Model" dev.manufacturer = "Coordinator Manufacturer" dev.zdo.Mgmt_NWK_Update_req = AsyncMock( return_value=[ zdo_t.Status.SUCCESS, t.Channels.ALL_CHANNELS, 0, 0, [80] * 16, ] ) async def force_remove(self, dev): pass async def add_endpoint(self, descriptor): pass async def permit_ncp(self, time_s=60): pass async def permit_with_link_key(self, node, link_key, time_s=60): pass async def reset_network_info(self): pass async def write_network_info(self, *, network_info, node_info): pass async def load_network_info(self, *, load_devices=False): self.state.network_info.channel = 15 def recursive_dict_merge( obj: dict[str, typing.Any], updates: dict[str, typing.Any] ) -> dict[str, typing.Any]: result = copy.deepcopy(obj) for key, update in updates.items(): if isinstance(update, dict) and key in result: result[key] = recursive_dict_merge(result[key], update) else: result[key] = update return result def make_app( config_updates: dict[str, typing.Any], app_base: zigpy.application.ControllerApplication = App, ) -> zigpy.application.ControllerApplication: config = recursive_dict_merge( { CONF_DATABASE: None, CONF_DEVICE: {CONF_DEVICE_PATH: "/dev/null"}, CONF_OTA: { CONF_OTA_IKEA: False, CONF_OTA_INOVELLI: False, CONF_OTA_LEDVANCE: False, CONF_OTA_SALUS: False, CONF_OTA_SONOFF: False, CONF_OTA_THIRDREALITY: False, }, }, config_updates, ) app = app_base(app_base.SCHEMA(config)) app.state.node_info = app_state.NodeInfo( nwk=t.NWK(0x0000), ieee=NCP_IEEE, logical_type=zdo_t.LogicalType.Coordinator ) app.device_initialized = Mock(wraps=app.device_initialized) app.listener_event = Mock(wraps=app.listener_event) app.get_sequence = MagicMock(wraps=app.get_sequence, return_value=123) app.send_packet = AsyncMock(wraps=app.send_packet) app.write_network_info = AsyncMock(wraps=app.write_network_info) return app @pytest.fixture def app(): """ControllerApplication Mock.""" return make_app({}) @pytest.fixture def app_mock(): """ControllerApplication Mock.""" return make_app({}) def make_ieee(start=0): return t.EUI64(map(t.uint8_t, range(start, start + 8))) def make_node_desc( *, logical_type: zdo_t.LogicalType = zdo_t.LogicalType.Router ) -> zdo_t.NodeDescriptor: return zdo_t.NodeDescriptor( logical_type=logical_type, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=zdo_t.NodeDescriptor.FrequencyBand.Freq2400MHz, mac_capability_flags=zdo_t.NodeDescriptor.MACCapabilityFlags.AllocateAddress, manufacturer_code=4174, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=0, maximum_outgoing_transfer_size=82, descriptor_capability_field=zdo_t.NodeDescriptor.DescriptorCapability.NONE, ) def add_initialized_device(app, nwk, ieee): dev = app.add_device(nwk=nwk, ieee=ieee) dev.node_desc = make_node_desc(logical_type=zdo_t.LogicalType.Router) ep = dev.add_endpoint(1) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = zigpy.profiles.zha.DeviceType.PUMP return dev @pytest.fixture def make_initialized_device(): count = 1 def inner(app): nonlocal count dev = add_initialized_device(app, nwk=0x1000 + count, ieee=make_ieee(count)) count += 1 return dev return inner def make_neighbor( *, ieee: t.EUI64, nwk: t.NWK, device_type: zdo_t.Neighbor.DeviceType = zdo_t.Neighbor.DeviceType.Router, rx_on_when_idle=True, relationship: zdo_t.Neighbor.Relationship = zdo_t.Neighbor.Relationship.Child, ) -> zdo_t.Neighbor: return zdo_t.Neighbor( extended_pan_id=make_ieee(start=0), ieee=ieee, nwk=nwk, device_type=device_type, rx_on_when_idle=int(rx_on_when_idle), relationship=relationship, reserved1=0, permit_joining=0, reserved2=0, depth=15, lqi=250, ) def make_neighbor_from_device( device: zigpy.device.Device, *, relationship: zdo_t.Neighbor.Relationship = zdo_t.Neighbor.Relationship.Child, ): assert device.node_desc is not None return make_neighbor( ieee=device.ieee, nwk=device.nwk, device_type=zdo_t.Neighbor.DeviceType(int(device.node_desc.logical_type)), rx_on_when_idle=device.node_desc.is_receiver_on_when_idle, relationship=relationship, ) def make_route( *, dest_nwk: t.NWK, next_hop: t.NWK, status: zdo_t.RouteStatus = zdo_t.RouteStatus.Active, ) -> zdo_t.Route: return zdo_t.Route( DstNWK=dest_nwk, RouteStatus=status, MemoryConstrained=0, ManyToOne=0, RouteRecordRequired=0, Reserved=0, NextHop=next_hop, ) # Taken from Home Assistant's `conftest.py` @pytest.fixture(autouse=True) def verify_cleanup( event_loop: asyncio.AbstractEventLoop, ) -> typing.Generator[None, None, None]: """Verify that the test has cleaned up resources correctly.""" # Skip with Python 3.8 and below if sys.version_info < (3, 9): yield return threads_before = frozenset(threading.enumerate()) tasks_before = asyncio.all_tasks(event_loop) yield event_loop.run_until_complete(event_loop.shutdown_default_executor()) # Warn and clean-up lingering tasks and timers # before moving on to the next test. tasks = asyncio.all_tasks(event_loop) - tasks_before for task in tasks: _LOGGER.warning("Linger task after test %r", task) task.cancel() if tasks: event_loop.run_until_complete(asyncio.wait(tasks)) for handle in event_loop._scheduled: # type: ignore[attr-defined] if not handle.cancelled(): _LOGGER.warning("Lingering timer after test %r", handle) handle.cancel() # Verify no threads where left behind. threads = frozenset(threading.enumerate()) - threads_before for thread in threads: assert isinstance(thread, threading._DummyThread) zigpy-0.62.3/tests/databases/000077500000000000000000000000001456054056700160515ustar00rootroot00000000000000zigpy-0.62.3/tests/databases/bad_attrs_v3.db000066400000000000000000006500001456054056700207340ustar00rootroot00000000000000SQLite format 3@ 5).C mM 3  3N=J!iindexrelays_idxrelaysCREATE UNIQUE INDEX relays_idx ON relays(ieee)wtablerelaysrelaysCREATE TABLE relays (ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE)~/'3indexgroup_members_idxgroup_membersCREATE UNIQUE INDEX group_members_idx ON group_members(group_id, ieee, endpoint_id)3''%tablegroup_membersgroup_membersCREATE TABLE group_members (group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE)Loindexgroup_idxgroupsCREATE UNIQUE INDEX group_idx ON groups(group_id)<UtablegroupsgroupsCREATE TABLE groups (group_id, name)w'!3indexattribute_idxattributesCREATE UNIQUE INDEX attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid)' !!tableattributesattributesCREATE TABLE attributes (ieee ieee, endpoint_id, cluster, attrid, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE) 1+7indexoutput_cluster_idxoutput_clusters CREATE UNIQUE INDEX output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster)C ++=tableoutput_clustersoutput_clusters CREATE TABLE output_clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE)s 5-indexnode_descriptors_idxnode_descriptors CREATE UNIQUE INDEX node_descriptors_idx ON node_descriptors(ieee) --itablenode_descriptorsnode_descriptors CREATE TABLE node_descriptors (ieee ieee, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE)V'uindexneighbors_idxneighbors CREATE INDEX neighbors_idx ON neighbors(device_ieee)F[tableneighborsneighborsCREATE TABLE neighbors (device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL,ieee ieee NOT NULL, nwk INTEGER NOT NULL, struct INTEGER NOT NULL, permit_joining INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices(ieee) ON DELETE CASCADE)g#indexcluster_idxclustersCREATE UNIQUE INDEX cluster_idx ON clusters(ieee, endpoint_id, cluster)./tableclustersclustersCREATE TABLE clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE)b% indexendpoint_idxendpointsCREATE UNIQUE INDEX endpoint_idx ON endpoints(ieee, endpoint_id)9AtableendpointsendpointsCREATE TABLE endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE)Hgindexieee_idxdevicesCREATE UNIQUE INDEX ieee_idx ON devices(ieee)GgtabledevicesdevicesCREATE TABLE devices (ieee ieee, nwk, status)   w _ :>  V 5   q P 0{[ !;60:a4:23:ff:fe:02:34:91 ;60:a4:23:ff:fe:02:36:24M;80:4b:50:ff:fe:41:67:d4;68:0a:e2:ff:fe:70:00:69;58:8e:81:ff:fe:15:e3:ffH;ec:1b:bd:ff:fe:37:72:7eG ;ec:1b:bd:ff:fe:33:a0:04S;00:15:8d:00:05:1e:13:46;00:15:8d:00:05:4a:73:c3u6;00:15:8d:00:05:1e:0e:32;60:a4:23:ff:fe:02:32:30;60:a4:23:ff:fe:02:36:93_;60:a4:23:ff:fe:02:30:39;60:a4:23:ff:fe:02:54:1eq;60:a4:23:ff:fe:02:38:60K;60:a4:23:ff:fe:02:2f:4d=;60:a4:23:ff:fe:02:38:59XM;60:a4:23:ff:fe:02:2f:96m ;80:4b:50:ff:fe:41:58:f3;80:4b:50:ff:fe:41:59:63;60:a4:23:ff:fe:02:36:a0G ;60:a4:23:ff:fe:02:3b:b4 ;ec:1b:bd:ff:fe:94:18:a4;60:a4:23:ff:fe:02:36:9c;60:a4:23:ff:fe:02:51:70Ջ;60:a4:23:ff:fe:02:38:af;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:2f:42_ ;cc:cc:cc:ff:fe:a5:f2:83  a } %Ay !u E=Y]  ) ;68:0a:e2:ff:fe:70:00:69;58:8e:81:ff:fe:15:e3:ff;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:33:a0:04;60:a4:23:ff:fe:02:38:59;00:15:8d:00:05:1e:13:46;00:15:8d:00:05:4a:73:c3;00:15:8d:00:05:1e:0e:32;60:a4:23:ff:fe:02:32:30;60:a4:23:ff:fe:02:34:91;60:a4:23:ff:fe:02:30:39;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:2f:42;60:a4:23:ff:fe:02:2f:4d;60:a4:23:ff:fe:02:51:70;60:a4:23:ff:fe:02:2f:96;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:59:63;80:4b:50:ff:fe:41:58:f3 ;60:a4:23:ff:fe:02:3b:b4 ;ec:1b:bd:ff:fe:94:18:a4 ;60:a4:23:ff:fe:02:36:9c;60:a4:23:ff:fe:02:38:60;60:a4:23:ff:fe:02:38:af;60:a4:23:ff:fe:02:36:a0;60:a4:23:ff:fe:02:36:24;60:a4:23:ff:fe:02:36:93;60:a4:23:ff:fe:02:39:7b; cc:cc:cc:ff:fe:a5:f2:83 6 p K )  t Q . | Z 8   oJ &K& r M o r N ) \ &6"; 60:a4:23:ff:fe:02:36:a0  #; 80:4b:50:ff:fe:41:58:f3a#; 80:4b:50:ff:fe:41:59:63a"; 80:4b:50:ff:fe:41:59:63  #; 80:4b:50:ff:fe:41:67:d4a#@; 68:0a:e2:ff:fe:70:00:69a"?; 68:0a:e2:ff:fe:70:00:69  >; 58:8e:81:ff:fe:15:e3:ff =; 58:8e:81:ff:fe:15:e3:ff <; 58:8e:81:ff:fe:15:e3:ff;;  58:8e:81:ff:fe:15:e3:ff"s;  ec:1b:bd:ff:fe:37:72:7e^"x;  ec:1b:bd:ff:fe:33:a0:04^#|; 60:a4:23:ff:fe:02:38:59a"{; 60:a4:23:ff:fe:02:38:59  !3;  00:15:8d:00:05:1e:13:46!2;  00:15:8d:00:05:4a:73:c3!1;  00:15:8d:00:05:1e:0e:32#0; 60:a4:23:ff:fe:02:32:30a"/; 60:a4:23:ff:fe:02:32:30  #J; 60:a4:23:ff:fe:02:34:91a"I; 60:a4:23:ff:fe:02:34:91  #z; 60:a4:23:ff:fe:02:30:39a"y; 60:a4:23:ff:fe:02:30:39  #u; 60:a4:23:ff:fe:02:54:1ea"t; 60:a4:23:ff:fe:02:54:1e  #\; 60:a4:23:ff:fe:02:2f:42a"[; 60:a4:23:ff:fe:02:2f:42  #; 60:a4:23:ff:fe:02:2f:4da"; 60:a4:23:ff:fe:02:2f:4d  #j; 60:a4:23:ff:fe:02:51:70a"i; 60:a4:23:ff:fe:02:51:70  #n; 60:a4:23:ff:fe:02:2f:96a"m; 60:a4:23:ff:fe:02:2f:96  "; 80:4b:50:ff:fe:41:67:d4  #"; 80:4b:50:ff:fe:41:58:f3  #; 60:a4:23:ff:fe:02:3b:b4a"; 60:a4:23:ff:fe:02:3b:b4  #r; ec:1b:bd:ff:fe:94:18:a4f"q;  ec:1b:bd:ff:fe:94:18:a4^ #; 60:a4:23:ff:fe:02:36:9ca"; 60:a4:23:ff:fe:02:36:9c  #f; 60:a4:23:ff:fe:02:38:60a"e; 60:a4:23:ff:fe:02:38:60  #p; 60:a4:23:ff:fe:02:38:afa"o; 60:a4:23:ff:fe:02:38:af  ##; 60:a4:23:ff:fe:02:36:a0a#^; 60:a4:23:ff:fe:02:36:24a"]; 60:a4:23:ff:fe:02:36:24  #V; 60:a4:23:ff:fe:02:36:93a"U; 60:a4:23:ff:fe:02:36:93  #`; 60:a4:23:ff:fe:02:39:7ba"_; 60:a4:23:ff:fe:02:39:7b  ";  cc:cc:cc:ff:fe:a5:f2:83 i6 C d * G x Z < u K ,  L-jX9 C v  W 8  b   ;60:a4:23:ff:fe:02:36:a0 ;80:4b:50:ff:fe:41:58:f3;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:59:63;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4 ;68:0a:e2:ff:fe:70:00:69@;68:0a:e2:ff:fe:70:00:69 ?;58:8e:81:ff:fe:15:e3:ff>;58:8e:81:ff:fe:15:e3:ff=;58:8e:81:ff:fe:15:e3:ff<; 58:8e:81:ff:fe:15:e3:ff;; ec:1b:bd:ff:fe:37:72:7es; ec:1b:bd:ff:fe:33:a0:04x;60:a4:23:ff:fe:02:38:59|;60:a4:23:ff:fe:02:38:59 {; 00:15:8d:00:05:1e:13:463; 00:15:8d:00:05:4a:73:c32; 00:15:8d:00:05:1e:0e:321;60:a4:23:ff:fe:02:32:300;60:a4:23:ff:fe:02:32:30 /;60:a4:23:ff:fe:02:34:91J;60:a4:23:ff:fe:02:34:91 I;60:a4:23:ff:fe:02:30:39z;60:a4:23:ff:fe:02:30:39 y;60:a4:23:ff:fe:02:54:1eu;60:a4:23:ff:fe:02:54:1e t;60:a4:23:ff:fe:02:2f:42\;60:a4:23:ff:fe:02:2f:42 [;60:a4:23:ff:fe:02:2f:4d;60:a4:23:ff:fe:02:2f:4d ;60:a4:23:ff:fe:02:51:70j;60:a4:23:ff:fe:02:51:70 i;60:a4:23:ff:fe:02:2f:96n;60:a4:23:ff:fe:02:2f:96 m ;80:4b:50:ff:fe:41:59:63 ;60:a4:23:ff:fe:02:3b:b4;60:a4:23:ff:fe:02:3b:b4 ;ec:1b:bd:ff:fe:94:18:a4r; ec:1b:bd:ff:fe:94:18:a4q;60:a4:23:ff:fe:02:36:9c;60:a4:23:ff:fe:02:36:9c ;60:a4:23:ff:fe:02:38:60f;60:a4:23:ff:fe:02:38:60 e;60:a4:23:ff:fe:02:38:afp;60:a4:23:ff:fe:02:38:af o;60:a4:23:ff:fe:02:36:a0;60:a4:23:ff:fe:02:36:24^;60:a4:23:ff:fe:02:36:24 ];60:a4:23:ff:fe:02:36:93V;60:a4:23:ff:fe:02:36:93 U;60:a4:23:ff:fe:02:39:7b`;60:a4:23:ff:fe:02:39:7b _; cc:cc:cc:ff:fe:a5:f2:83*.\!;60:a4:23:ff:fe:02:36:a0 ( +/@14h05!8%m.<#d,A(;68:0a:e2:ff:fe:70:00:69';60:a4:23:ff:fe:02:38:af&;60:a4:23:ff:fe:02:36:935  [f  G \ =  ( Q p z [3'E;&80:4b:50:ff:fe:41:58:f3@ORR,R'D;&80:4b:50:ff:fe:41:59:63@ORR,R'C;&80:4b:50:ff:fe:41:67:d4@ORR,R'!;&68:0a:e2:ff:fe:70:00:69@RR,R' ;&58:8e:81:ff:fe:15:e3:ff@$RR,R'<;&ec:1b:bd:ff:fe:37:72:7e@|RRR'?;&ec:1b:bd:ff:fe:33:a0:04@|RRR'A;&60:a4:23:ff:fe:02:38:59@RR,R';&00:15:8d:00:05:1e:13:46@7dd';&00:15:8d:00:05:4a:73:c3@7dd'6;&00:15:8d:00:05:1e:0e:32@7dd';&60:a4:23:ff:fe:02:32:30@ORR,R'&;&60:a4:23:ff:fe:02:34:91@ORR,R'@;&60:a4:23:ff:fe:02:30:39@RR,R'=;&60:a4:23:ff:fe:02:54:1e@RR,R'/;&60:a4:23:ff:fe:02:2f:42@RR,R';&60:a4:23:ff:fe:02:2f:4d@RR,R'7;&60:a4:23:ff:fe:02:51:70@RR,R'9;&60:a4:23:ff:fe:02:2f:96@RR,R' ;&60:a4:23:ff:fe:02:3b:b4@RR,R';;&ec:1b:bd:ff:fe:94:18:a4@$RR,R' ;&60:a4:23:ff:fe:02:36:9c@RR,R'4;&60:a4:23:ff:fe:02:38:60@RR,R':;&60:a4:23:ff:fe:02:38:af@RR,R'F;&60:a4:23:ff:fe:02:36:a0@RR,R'0;&60:a4:23:ff:fe:02:36:24@RR,R',;&60:a4:23:ff:fe:02:36:93@RR,R'1;&60:a4:23:ff:fe:02:39:7b@RR,R';&cc:cc:cc:ff:fe:a5:f2:83@ͫRA,  EyA %!u =Y] )  } a;80:4b:50:ff:fe:41:58:f3E;80:4b:50:ff:fe:41:59:63D;80:4b:50:ff:fe:41:67:d4C;68:0a:e2:ff:fe:70:00:69!;58:8e:81:ff:fe:15:e3:ff ;ec:1b:bd:ff:fe:37:72:7e<;ec:1b:bd:ff:fe:33:a0:04?;60:a4:23:ff:fe:02:38:59A;00:15:8d:00:05:1e:13:46;00:15:8d:00:05:4a:73:c3;00:15:8d:00:05:1e:0e:326;60:a4:23:ff:fe:02:32:30;60:a4:23:ff:fe:02:34:91&;60:a4:23:ff:fe:02:30:39@;60:a4:23:ff:fe:02:54:1e=;60:a4:23:ff:fe:02:2f:42/;60:a4:23:ff:fe:02:2f:4d;60:a4:23:ff:fe:02:51:707;60:a4:23:ff:fe:02:2f:969;60:a4:23:ff:fe:02:3b:b4 ;ec:1b:bd:ff:fe:94:18:a4;;60:a4:23:ff:fe:02:36:9c ;60:a4:23:ff:fe:02:38:604;60:a4:23:ff:fe:02:38:af:;60:a4:23:ff:fe:02:36:a0F;60:a4:23:ff:fe:02:36:240;60:a4:23:ff:fe:02:36:93,;60:a4:23:ff:fe:02:39:7b1; cc:cc:cc:ff:fe:a5:f2:83 9  d G )  a C $ jK+ pQ1vW7 Y 9 l L 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9\;68:0a:e2:ff:fe:70:00:69![;68:0a:e2:ff:fe:70:00:69 Z;68:0a:e2:ff:fe:70:00:69 Y;58:8e:81:ff:fe:15:e3:ffX;58:8e:81:ff:fe:15:e3:ffW;58:8e:81:ff:fe:15:e3:ffV;58:8e:81:ff:fe:15:e3:ffU;58:8e:81:ff:fe:15:e3:ffT;58:8e:81:ff:fe:15:e3:ffS;58:8e:81:ff:fe:15:e3:ffR;58:8e:81:ff:fe:15:e3:ffQ;58:8e:81:ff:fe:15:e3:ffP;58:8e:81:ff:fe:15:e3:ffO;58:8e:81:ff:fe:15:e3:ffN;58:8e:81:ff:fe:15:e3:ffM;58:8e:81:ff:fe:15:e3:ffL;58:8e:81:ff:fe:15:e3:ffK;58:8e:81:ff:fe:15:e3:ffJ;58:8e:81:ff:fe:15:e3:ffI;58:8e:81:ff:fe:15:e3:ffH;58:8e:81:ff:fe:15:e3:ffG;58:8e:81:ff:fe:15:e3:ffF;58:8e:81:ff:fe:15:e3:ffE;58:8e:81:ff:fe:15:e3:ffD;58:8e:81:ff:fe:15:e3:ffC;58:8e:81:ff:fe:15:e3:ffB;58:8e:81:ff:fe:15:e3:ffA; 58:8e:81:ff:fe:15:e3:ff@; 58:8e:81:ff:fe:15:e3:ff?; 58:8e:81:ff:fe:15:e3:ff>; 58:8e:81:ff:fe:15:e3:ff=; 58:8e:81:ff:fe:15:e3:ff<; 58:8e:81:ff:fe:15:e3:ff;; 58:8e:81:ff:fe:15:e3:ff:; 58:8e:81:ff:fe:15:e3:ff xUi;80:4b:50:ff:fe:41:58:f3!h;80:4b:50:ff:fe:41:58:f3 g;80:4b:50:ff:fe:41:59:63!f;80:4b:50:ff:fe:41:59:63 e;80:4b:50:ff:fe:41:67:d4!d;80:4b:50:ff:fe:41:67:d4 +; 00:15:8d:00:05:1e:13:46*; 00:15:8d:00:05:1e:13:46); 00:15:8d:00:05:1e:13:46(; 00:15:8d:00:05:4a:73:c3'; 00:15:8d:00:05:4a:73:c3&; 00:15:8d:00:05:1e:0e:32%; 00:15:8d:00:05:1e:0e:32$; 00:15:8d:00:05:1e:0e:32#;60:a4:23:ff:fe:02:32:30!";60:a4:23:ff:fe:02:32:30 c;60:a4:23:ff:fe:02:34:91!b;60:a4:23:ff:fe:02:34:91  `;60:a4:23:ff:fe:02:2f:4d! ;60:a4:23:ff:fe:02:3b:b4!!>;60:a4:23:ff:fe:02:36:9c!; cc:cc:cc:ff:fe:a5:f2:83 9e | !  ^ ? i J + lL, jJ* hH( < e ?  _  C C C C C C  ;68:0a:e2:ff:fe:70:00:69!\;68:0a:e2:ff:fe:70:00:69 [;68:0a:e2:ff:fe:70:00:69 Z ;58:8e:81:ff:fe:15:e3:ffY ;58:8e:81:ff:fe:15:e3:ffX;58:8e:81:ff:fe:15:e3:ffW;58:8e:81:ff:fe:15:e3:ffV;58:8e:81:ff:fe:15:e3:ffU;58:8e:81:ff:fe:15:e3:ffT;58:8e:81:ff:fe:15:e3:ffS;58:8e:81:ff:fe:15:e3:ffR ;58:8e:81:ff:fe:15:e3:ffQ ;58:8e:81:ff:fe:15:e3:ffP;58:8e:81:ff:fe:15:e3:ffO;58:8e:81:ff:fe:15:e3:ffN;58:8e:81:ff:fe:15:e3:ffM;58:8e:81:ff:fe:15:e3:ffL;58:8e:81:ff:fe:15:e3:ffK;58:8e:81:ff:fe:15:e3:ffJ ;58:8e:81:ff:fe:15:e3:ffI ;58:8e:81:ff:fe:15:e3:ffH;58:8e:81:ff:fe:15:e3:ffG;58:8e:81:ff:fe:15:e3:ffF;58:8e:81:ff:fe:15:e3:ffE;58:8e:81:ff:fe:15:e3:ffD;58:8e:81:ff:fe:15:e3:ffC;58:8e:81:ff:fe:15:e3:ffB; 58:8e:81:ff:fe:15:e3:ffA; 58:8e:81:ff:fe:15:e3:ff@; 58:8e:81:ff:fe:15:e3:ff?; 58:8e:81:ff:fe:15:e3:ff>; 58:8e:81:ff:fe:15:e3:ff=; 58:8e:81:ff:fe:15:e3:ff<; 58:8e:81:ff:fe:15:e3:ff;; 58:8e:81:ff:fe:15:e3:ff: \X ;80:4b:50:ff:fe:41:58:f3!i;80:4b:50:ff:fe:41:58:f3 h ;80:4b:50:ff:fe:41:59:63!g;80:4b:50:ff:fe:41:59:63 f ;80:4b:50:ff:fe:41:67:d4!e;80:4b:50:ff:fe:41:67:d4 d ; 00:15:8d:00:05:1e:13:46+; 00:15:8d:00:05:1e:13:46*; 00:15:8d:00:05:1e:13:46); 00:15:8d:00:05:4a:73:c3(; 00:15:8d:00:05:4a:73:c3' ; 00:15:8d:00:05:1e:0e:32&; 00:15:8d:00:05:1e:0e:32%; 00:15:8d:00:05:1e:0e:32$ ;60:a4:23:ff:fe:02:32:30!#;60:a4:23:ff:fe:02:32:30 " ;60:a4:23:ff:fe:02:34:91!c;60:a4:23:ff:fe:02:34:91 b c ;60:a4:23:ff:fe:02:2f:4d! ;60:a4:23:ff:fe:02:3b:b4! @ ;60:a4:23:ff:fe:02:36:9c!;  cc:cc:cc:ff:fe:a5:f2:83$Y $ c"c#; 60:a4:23:ff:fe:02:38:59 $;60:a4:23:ff:fe:02:54:1e @ q#$;60:a4:23:ff:fe:02:30:39 $ TeT#Living Room;Default Lightlink Group)Bedroom Lights'Office Lights#Hood Lights#Corner Lamp5Guest Bedroom Lights9Kitchen Ceiling Lights     S4 'reF  z [ <  :;60:a4:23:ff:fe:02:38:59 9;60:a4:23:ff:fe:02:3b:b4 8;80:4b:50:ff:fe:41:58:f3 7;60:a4:23:ff:fe:02:36:9c 6;80:4b:50:ff:fe:41:59:63 5;80:4b:50:ff:fe:41:67:d4 4;80:4b:50:ff:fe:41:59:63 /;60:a4:23:ff:fe:02:51:70 ;60:a4:23:ff:fe:02:32:30 &;60:a4:23:ff:fe:02:34:91 );60:a4:23:ff:fe:02:38:af $;60:a4:23:ff:fe:02:39:7b #;60:a4:23:ff:fe:02:36:93 *; ec:1b:bd:ff:fe:37:72:7e0; ec:1b:bd:ff:fe:33:a0:043;80:4b:50:ff:fe:41:58:f3 2;80:4b:50:ff:fe:41:67:d4 +;60:a4:23:ff:fe:02:2f:42 (;60:a4:23:ff:fe:02:38:60 -;60:a4:23:ff:fe:02:54:1e ,;60:a4:23:ff:fe:02:2f:96 "; cc:cc:cc:ff:fe:a5:f2:83;60:a4:23:ff:fe:02:2f:4d ';60:a4:23:ff:fe:02:36:24  a A! ffF&  & F F;60:a4:23:ff:fe:02:38:59 :;60:a4:23:ff:fe:02:3b:b4 9;80:4b:50:ff:fe:41:58:f3 8;60:a4:23:ff:fe:02:36:9c 7;80:4b:50:ff:fe:41:59:63 6;80:4b:50:ff:fe:41:67:d4 5;80:4b:50:ff:fe:41:59:63 4;60:a4:23:ff:fe:02:51:70 /;60:a4:23:ff:fe:02:36:24 ';60:a4:23:ff:fe:02:32:30 ;60:a4:23:ff:fe:02:34:91 &;60:a4:23:ff:fe:02:38:af );60:a4:23:ff:fe:02:39:7b $;60:a4:23:ff:fe:02:36:93 #; ec:1b:bd:ff:fe:37:72:7e*; ec:1b:bd:ff:fe:33:a0:040;80:4b:50:ff:fe:41:58:f3 3;80:4b:50:ff:fe:41:67:d4 2;60:a4:23:ff:fe:02:2f:42 +;60:a4:23:ff:fe:02:38:60 (;60:a4:23:ff:fe:02:54:1e -;60:a4:23:ff:fe:02:2f:96 ,; cc:cc:cc:ff:fe:a5:f2:83" ;60:a4:23:ff:fe:02:2f:4d    ~_@ pO . Q#.# ;80:4b:50:ff:fe:41:67:d4w;60:a4:23:ff:fe:02:36:a0;ec:1b:bd:ff:fe:94:18:a4KE;60:a4:23:ff:fe:02:36:24!;80:4b:50:ff:fe:41:59:63G;60:a4:23:ff:fe:02:51:70K;80:4b:50:ff:fe:41:58:f3GD;ec:1b:bd:ff:fe:33:a0:04q;00:15:8d:00:05:1e:13:46K;68:0a:e2:ff:fe:70:00:69s;58:8e:81:ff:fe:15:e3:ff%m;60:a4:23:ff:fe:02:36:93;60:a4:23:ff:fe:02:38:60;60:a4:23:ff:fe:02:38:afL;00:15:8d:00:05:4a:73:c3TK.;60:a4:23:ff:fe:02:39:7ba;00:15:8d:00:05:1e:0e:32  \z>L.jj \ jj    Z e9 e e G - Q79< jj;80:4b:50:ff:fe:41:58:f3&;00:15:8d:00:05:1e:0e:32 ";60:a4:23:ff:fe:02:38:af ݏ;00:15:8d:00:05:4a:73:c3 ;58:8e:81:ff:fe:15:e3:ff s;00:15:8d:00:05:1e:13:46&;60:a4:23:ff:fe:02:36:a0&w;80:4b:50:ff:fe:41:59:63&;60:a4:23:ff:fe:02:36:93 m;60:a4:23:ff:fe:02:38:60 ݓ;68:0a:e2:ff:fe:70:00:69 ;60:a4:23:ff:fe:02:51:70&;ec:1b:bd:ff:fe:94:18:a4&;60:a4:23:ff:fe:02:36:24&E;ec:1b:bd:ff:fe:33:a0:04%D;80:4b:50:ff:fe:41:67:d4&;60:a4:23:ff:fe:02:39:7b #. ;09u|V1sY2   Nc r L X -  ` 9  s M (8sHL%X000000000000000000%%%%%%tR"_>{T<$Y;60:a4:23:ff:fe:02:36:9c @ $X;60:a4:23:ff:fe:02:36:9c @ < ; Iec:1b:bd:ff:fe:33:a0:04TRADFRI bulb E14 W op/ch 400lmTu ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%HTt ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%STr ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%>TU ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6558:8e:81:ff:fe:15:e3:ffHV ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30%U~ ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%xU} ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%=U| ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91%UU{ ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eݢ%MUz ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%CVy ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%Uw ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%+Uv ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%4Uu ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7e%6Ut ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%iTs ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59}%5Tr ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%>Tq ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04d{%6Tp ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39Q%-Uo ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24MUn ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0Mx%Tm ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%*Ul ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%Qk ;;;60:a4:23:ff:fe:02:36:937f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$M q)^<xY: v W 7  s S 2  m L + K g G '  d C " )o|[:{Z9rQ0iH'`?wX8vV5m!;60:a4:23:ff:fe:02:2f:96 !;60:a4:23:ff:fe:02:2f:42 w;60:a4:23:ff:fe:02:30:39 " ;60:a4:23:ff:fe:02:36:a0 ' ;60:a4:23:ff:fe:02:36:a0 & ;60:a4:23:ff:fe:02:36:a0 % ;60:a4:23:ff:fe:02:36:a0 $ ;60:a4:23:ff:fe:02:36:a0 #;60:a4:23:ff:fe:02:36:a0 " ;60:a4:23:ff:fe:02:36:9c 8 ;60:a4:23:ff:fe:02:36:9c 7;60:a4:23:ff:fe:02:36:9c 6;60:a4:23:ff:fe:02:36:9c 5;60:a4:23:ff:fe:02:36:9c 4;60:a4:23:ff:fe:02:36:9c 3;60:a4:23:ff:fe:02:36:9c 2;60:a4:23:ff:fe:02:36:9c 1!;60:a4:23:ff:fe:02:36:93 _!;60:a4:23:ff:fe:02:36:93 ^ ;60:a4:23:ff:fe:02:36:93 ] ;60:a4:23:ff:fe:02:36:93 \ ;60:a4:23:ff:fe:02:36:93 [ ;60:a4:23:ff:fe:02:36:93 Z ;60:a4:23:ff:fe:02:36:93 Y;60:a4:23:ff:fe:02:36:93 X!;60:a4:23:ff:fe:02:36:24 !;60:a4:23:ff:fe:02:36:24 ~ ;60:a4:23:ff:fe:02:36:24 } ;60:a4:23:ff:fe:02:36:24 | ;60:a4:23:ff:fe:02:36:24 { ;60:a4:23:ff:fe:02:36:24 z ;60:a4:23:ff:fe:02:36:24 y;60:a4:23:ff:fe:02:36:24 x!;60:a4:23:ff:fe:02:34:91 /!;60:a4:23:ff:fe:02:34:91 . ;60:a4:23:ff:fe:02:34:91 - ;60:a4:23:ff:fe:02:34:91 , ;60:a4:23:ff:fe:02:34:91 + ;60:a4:23:ff:fe:02:34:91 * ;60:a4:23:ff:fe:02:34:91 );60:a4:23:ff:fe:02:34:91 (!;60:a4:23:ff:fe:02:32:30 !;60:a4:23:ff:fe:02:32:30  ;60:a4:23:ff:fe:02:32:30  ;60:a4:23:ff:fe:02:32:30  ;60:a4:23:ff:fe:02:32:30  ;60:a4:23:ff:fe:02:32:30  ;60:a4:23:ff:fe:02:32:30 ;60:a4:23:ff:fe:02:32:30 :!;60:a4:23:ff:fe:02:30:39 !;60:a4:23:ff:fe:02:30:39  ;60:a4:23:ff:fe:02:30:39  ;60:a4:23:ff:fe:02:30:39  ;60:a4:23:ff:fe:02:30:39  ;60:a4:23:ff:fe:02:30:39  ;60:a4:23:ff:fe:02:30:39 !;60:a4:23:ff:fe:02:2f:96  ;60:a4:23:ff:fe:02:2f:96  ;60:a4:23:ff:fe:02:2f:96  ;60:a4:23:ff:fe:02:2f:96  ;60:a4:23:ff:fe:02:2f:96  ;60:a4:23:ff:fe:02:2f:96 ;60:a4:23:ff:fe:02:2f:96  ;60:a4:23:ff:fe:02:2f:4d } ;60:a4:23:ff:fe:02:2f:4d |;60:a4:23:ff:fe:02:2f:4d {;60:a4:23:ff:fe:02:2f:4d z;60:a4:23:ff:fe:02:2f:4d y;60:a4:23:ff:fe:02:2f:4d x;60:a4:23:ff:fe:02:2f:4d w;60:a4:23:ff:fe:02:2f:4d v!;60:a4:23:ff:fe:02:2f:42 v ;60:a4:23:ff:fe:02:2f:42 u ;60:a4:23:ff:fe:02:2f:42 t ;60:a4:23:ff:fe:02:2f:42 s ;60:a4:23:ff:fe:02:2f:42 r ;60:a4:23:ff:fe:02:2f:42 q;60:a4:23:ff:fe:02:2f:42 p!;58:8e:81:ff:fe:15:e3:ff  ;58:8e:81:ff:fe:15:e3:ff; 58:8e:81:ff:fe:15:e3:ff;58:8e:81:ff:fe:15:e3:ff!;58:8e:81:ff:fe:15:e3:ff  ;58:8e:81:ff:fe:15:e3:ff; 58:8e:81:ff:fe:15:e3:ff;58:8e:81:ff:fe:15:e3:ff!;58:8e:81:ff:fe:15:e3:ff  ;58:8e:81:ff:fe:15:e3:ff; 58:8e:81:ff:fe:15:e3:ff;58:8e:81:ff:fe:15:e3:ff ; 58:8e:81:ff:fe:15:e3:ff ; 58:8e:81:ff:fe:15:e3:ff; 58:8e:81:ff:fe:15:e3:ff; 58:8e:81:ff:fe:15:e3:ff!; 00:15:8d:00:05:4a:73:c3 ; 00:15:8d:00:05:4a:73:c3 ; 00:15:8d:00:05:4a:73:c3 ; 00:15:8d:00:05:4a:73:c3; 00:15:8d:00:05:4a:73:c3; 00:15:8d:00:05:4a:73:c3; 00:15:8d:00:05:4a:73:c3!; 00:15:8d:00:05:1e:13:46 ; 00:15:8d:00:05:1e:13:46 ; 00:15:8d:00:05:1e:13:46 ; 00:15:8d:00:05:1e:13:46; 00:15:8d:00:05:1e:13:46; 00:15:8d:00:05:1e:13:46!; 00:15:8d:00:05:1e:0e:32 ; 00:15:8d:00:05:1e:0e:32 ; 00:15:8d:00:05:1e:0e:32 ; 00:15:8d:00:05:1e:0e:32; 00:15:8d:00:05:1e:0e:32; 00:15:8d:00:05:1e:0e:32 o}\;tS2 k J )  b A { [ ;  y X 7  p O . gF%xW6pO. `?iI) hH(gG' ; ec:1b:bd:ff:fe:94:18:a4 ; ec:1b:bd:ff:fe:94:18:a4  ; ec:1b:bd:ff:fe:94:18:a4; ec:1b:bd:ff:fe:94:18:a4; ec:1b:bd:ff:fe:94:18:a4; ec:1b:bd:ff:fe:94:18:a4; ec:1b:bd:ff:fe:94:18:a4; ec:1b:bd:ff:fe:94:18:a4; ec:1b:bd:ff:fe:94:18:a4 ; ec:1b:bd:ff:fe:37:72:7e ; ec:1b:bd:ff:fe:37:72:7e ; ec:1b:bd:ff:fe:37:72:7e; ec:1b:bd:ff:fe:37:72:7e; ec:1b:bd:ff:fe:37:72:7e; ec:1b:bd:ff:fe:37:72:7e; ec:1b:bd:ff:fe:37:72:7e; ec:1b:bd:ff:fe:37:72:7e ; ec:1b:bd:ff:fe:33:a0:04 ; ec:1b:bd:ff:fe:33:a0:04 ; ec:1b:bd:ff:fe:33:a0:04; ec:1b:bd:ff:fe:33:a0:04; ec:1b:bd:ff:fe:33:a0:04; ec:1b:bd:ff:fe:33:a0:04; ec:1b:bd:ff:fe:33:a0:04; ec:1b:bd:ff:fe:33:a0:04!;80:4b:50:ff:fe:41:59:63 !;80:4b:50:ff:fe:41:59:63  ;80:4b:50:ff:fe:41:59:63  ;80:4b:50:ff:fe:41:59:63  ;80:4b:50:ff:fe:41:59:63  ;80:4b:50:ff:fe:41:59:63  ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63  !;80:4b:50:ff:fe:41:58:f3 !!;80:4b:50:ff:fe:41:58:f3  ;80:4b:50:ff:fe:41:58:f3  ;80:4b:50:ff:fe:41:58:f3  ;80:4b:50:ff:fe:41:58:f3  ;80:4b:50:ff:fe:41:58:f3  ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 !;80:4b:50:ff:fe:41:67:d4 !;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;68:0a:e2:ff:fe:70:00:69  ;68:0a:e2:ff:fe:70:00:69  ;68:0a:e2:ff:fe:70:00:69  ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 !;60:a4:23:ff:fe:02:54:1e !;60:a4:23:ff:fe:02:54:1e  ;60:a4:23:ff:fe:02:54:1e  ;60:a4:23:ff:fe:02:54:1e  ;60:a4:23:ff:fe:02:54:1e  ;60:a4:23:ff:fe:02:54:1e  ;60:a4:23:ff:fe:02:54:1e ;60:a4:23:ff:fe:02:54:1e !;60:a4:23:ff:fe:02:51:70 !;60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:51:70 ;60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:3b:b4 J ;60:a4:23:ff:fe:02:3b:b4 I;60:a4:23:ff:fe:02:3b:b4 H;60:a4:23:ff:fe:02:3b:b4 G;60:a4:23:ff:fe:02:3b:b4 F;60:a4:23:ff:fe:02:3b:b4 E;60:a4:23:ff:fe:02:3b:b4 D;60:a4:23:ff:fe:02:3b:b4 C!;60:a4:23:ff:fe:02:39:7b !;60:a4:23:ff:fe:02:39:7b  ;60:a4:23:ff:fe:02:39:7b  ;60:a4:23:ff:fe:02:39:7b  ;60:a4:23:ff:fe:02:39:7b  ;60:a4:23:ff:fe:02:39:7b  ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b !;60:a4:23:ff:fe:02:38:af !;60:a4:23:ff:fe:02:38:af  ;60:a4:23:ff:fe:02:38:af  ;60:a4:23:ff:fe:02:38:af  ;60:a4:23:ff:fe:02:38:af  ;60:a4:23:ff:fe:02:38:af  ;60:a4:23:ff:fe:02:38:af ;60:a4:23:ff:fe:02:38:af !;60:a4:23:ff:fe:02:38:60 !;60:a4:23:ff:fe:02:38:60  ;60:a4:23:ff:fe:02:38:60  ;60:a4:23:ff:fe:02:38:60  ;60:a4:23:ff:fe:02:38:60  ;60:a4:23:ff:fe:02:38:60  ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 !;60:a4:23:ff:fe:02:38:59 !;60:a4:23:ff:fe:02:38:59  ;60:a4:23:ff:fe:02:38:59  ;60:a4:23:ff:fe:02:38:59  ;60:a4:23:ff:fe:02:38:59  ;60:a4:23:ff:fe:02:38:59  ;60:a4:23:ff:fe:02:38:59 ;60:a4:23:ff:fe:02:38:59  !;60:a4:23:ff:fe:02:36:a0 ) 3 q Q 1gH(W8 x_>  _ A {[; | | | | | | | | | | | | | | | | | | | | | | | | | | | | { { { { { { { { { { { { { { { {         C; 00:15:8d:00:05:1e:13:46B; 00:15:8d:00:05:4a:73:c3A; 00:15:8d:00:05:4a:73:c3@; 00:15:8d:00:05:4a:73:c3?; 00:15:8d:00:05:4a:73:c3>; 00:15:8d:00:05:4a:73:c3mJ;60:a4:23:ff:fe:02:3b:b4 I;60:a4:23:ff:fe:02:3b:b4 H;60:a4:23:ff:fe:02:3b:b4 G;60:a4:23:ff:fe:02:3b:b4 F;60:a4:23:ff:fe:02:3b:b4 E;60:a4:23:ff:fe:02:3b:b4 D;60:a4:23:ff:fe:02:3b:b4 C;60:a4:23:ff:fe:02:3b:b4 ux;60:a4:23:ff:fe:02:2f:4d w;60:a4:23:ff:fe:02:2f:4d v;60:a4:23:ff:fe:02:2f:4d *8;60:a4:23:ff:fe:02:36:9c 7;60:a4:23:ff:fe:02:36:9c 6;60:a4:23:ff:fe:02:36:9c 5;60:a4:23:ff:fe:02:36:9c 4;60:a4:23:ff:fe:02:36:9c 3;60:a4:23:ff:fe:02:36:9c 2;60:a4:23:ff:fe:02:36:9c 1;60:a4:23:ff:fe:02:36:9c RH; 00:15:8d:00:05:1e:13:46G; 00:15:8d:00:05:1e:13:46F; 00:15:8d:00:05:1e:13:46E; 00:15:8d:00:05:1e:13:46D; 00:15:8d:00:05:1e:13:46 \};60:a4:23:ff:fe:02:2f:4d |;60:a4:23:ff:fe:02:2f:4d {;60:a4:23:ff:fe:02:2f:4d z;60:a4:23:ff:fe:02:2f:4d y;60:a4:23:ff:fe:02:2f:4d  =; 00:15:8d:00:05:4a:73:c3<; 00:15:8d:00:05:4a:73:c3;; 00:15:8d:00:05:1e:0e:32:; 00:15:8d:00:05:1e:0e:329; 00:15:8d:00:05:1e:0e:328; 00:15:8d:00:05:1e:0e:327; 00:15:8d:00:05:1e:0e:326; 00:15:8d:00:05:1e:0e:325;60:a4:23:ff:fe:02:32:30 4;60:a4:23:ff:fe:02:32:30 3;60:a4:23:ff:fe:02:32:30 2;60:a4:23:ff:fe:02:32:30 1;60:a4:23:ff:fe:02:32:30 0;60:a4:23:ff:fe:02:32:30 /;60:a4:23:ff:fe:02:32:30 .;60:a4:23:ff:fe:02:32:30 K u V 6  y Y 8 zZ:`@ _?|\<}]=|\<?~] =``````````````````````````````````````````````v;60:a4:23:ff:fe:02:2f:42 u;60:a4:23:ff:fe:02:2f:42 t;60:a4:23:ff:fe:02:2f:42 s;60:a4:23:ff:fe:02:2f:42 r;60:a4:23:ff:fe:02:2f:42 q;60:a4:23:ff:fe:02:2f:42 p;60:a4:23:ff:fe:02:2f:42 ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:39:7b ;60:a4:23:ff:fe:02:36:24 ~;60:a4:23:ff:fe:02:36:24 };60:a4:23:ff:fe:02:36:24 |;60:a4:23:ff:fe:02:36:24 {;60:a4:23:ff:fe:02:36:24 z;60:a4:23:ff:fe:02:36:24 y;60:a4:23:ff:fe:02:36:24 x;60:a4:23:ff:fe:02:36:24 w;60:a4:23:ff:fe:02:2f:42 _;60:a4:23:ff:fe:02:36:93 ^;60:a4:23:ff:fe:02:36:93 ];60:a4:23:ff:fe:02:36:93 \;60:a4:23:ff:fe:02:36:93 [;60:a4:23:ff:fe:02:36:93 Z;60:a4:23:ff:fe:02:36:93 Y;60:a4:23:ff:fe:02:36:93 X;60:a4:23:ff:fe:02:36:93 /;60:a4:23:ff:fe:02:34:91 .;60:a4:23:ff:fe:02:34:91 -;60:a4:23:ff:fe:02:34:91 ,;60:a4:23:ff:fe:02:34:91 +;60:a4:23:ff:fe:02:34:91 *;60:a4:23:ff:fe:02:34:91 );60:a4:23:ff:fe:02:34:91 (;60:a4:23:ff:fe:02:34:91 ;60:a4:23:ff:fe:02:39:7b ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;58:8e:81:ff:fe:15:e3:ff ;58:8e:81:ff:fe:15:e3:ff~; 58:8e:81:ff:fe:15:e3:ff};58:8e:81:ff:fe:15:e3:ff|;58:8e:81:ff:fe:15:e3:ff {;58:8e:81:ff:fe:15:e3:ffz; 58:8e:81:ff:fe:15:e3:ffy;58:8e:81:ff:fe:15:e3:ffx;58:8e:81:ff:fe:15:e3:ff w;58:8e:81:ff:fe:15:e3:ffv; 58:8e:81:ff:fe:15:e3:ffu;58:8e:81:ff:fe:15:e3:fft; 58:8e:81:ff:fe:15:e3:ff s; 58:8e:81:ff:fe:15:e3:ffr; 58:8e:81:ff:fe:15:e3:ffq; 58:8e:81:ff:fe:15:e3:ff \);60:a4:23:ff:fe:02:51:70  ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 ;60:a4:23:ff:fe:02:38:60 .;60:a4:23:ff:fe:02:51:70 -;60:a4:23:ff:fe:02:51:70 ,;60:a4:23:ff:fe:02:51:70 +;60:a4:23:ff:fe:02:51:70 *;60:a4:23:ff:fe:02:51:70  _ wkF! xT0U <  $e iE! 2 ` :M(G  ^ O  O+l@ os 5 \ 7  zL)r Z* ^: !#; 60:a4:23:ff:fe:02:38:60 l#;60:a4:23:ff:fe:02:38:60 k";60:a4:23:ff:fe:02:38:60 m";60:a4:23:ff:fe:02:38:60 l";60:a4:23:ff:fe:02:38:60  n ;60:a4:23:ff:fe:02:38:60 a ";60:a4:23:ff:fe:02:39:7b ";60:a4:23:ff:fe:02:39:7b  m$;60:a4:23:ff:fe:02:38:60 f";60:a4:23:ff:fe:02:54:1e  n";60:a4:23:ff:fe:02:51:70  n";60:a4:23:ff:fe:02:3b:b4  n#; 60:a4:23:ff:fe:02:54:1e %w#;60:a4:23:ff:fe:02:54:1e %v#; 60:a4:23:ff:fe:02:3b:b4  #;60:a4:23:ff:fe:02:3b:b4  #; 60:a4:23:ff:fe:02:51:70 %5#;60:a4:23:ff:fe:02:51:70 %4$;60:a4:23:ff:fe:02:38:af %g$;60:a4:23:ff:fe:02:38:af V";60:a4:23:ff:fe:02:39:7b $;60:a4:23:ff:fe:02:39:7b $;60:a4:23:ff:fe:02:39:7b $;60:a4:23:ff:fe:02:54:1e ^$;60:a4:23:ff:fe:02:54:1e %z";60:a4:23:ff:fe:02:38:af %h$;60:a4:23:ff:fe:02:38:60 g$;60:a4:23:ff:fe:02:38:af ";60:a4:23:ff:fe:02:54:1e ";60:a4:23:ff:fe:02:54:1e ";60:a4:23:ff:fe:02:51:70 ";60:a4:23:ff:fe:02:51:70 $;60:a4:23:ff:fe:02:51:70 _$;60:a4:23:ff:fe:02:51:70 `$;60:a4:23:ff:fe:02:38:60 %9";60:a4:23:ff:fe:02:38:af %i$;60:a4:23:ff:fe:02:39:7b $;60:a4:23:ff:fe:02:3b:b4 $$;60:a4:23:ff:fe:02:3b:b4 $";60:a4:23:ff:fe:02:3b:b4 $";60:a4:23:ff:fe:02:3b:b4 $$;60:a4:23:ff:fe:02:51:70 a#;60:a4:23:ff:fe:02:38:af @ s#;60:a4:23:ff:fe:02:38:af @ r#;60:a4:23:ff:fe:02:38:af @ t "%;60:a4:23:ff:fe:02:38:af @$$;60:a4:23:ff:fe:02:54:1e %y";60:a4:23:ff:fe:02:38:af  n";60:a4:23:ff:fe:02:39:7b  m$;60:a4:23:ff:fe:02:3b:b4 $#; 60:a4:23:ff:fe:02:38:af #;60:a4:23:ff:fe:02:38:af %;60:a4:23:ff:fe:02:54:1e @%{ ;60:a4:23:ff:fe:02:38:af [#;60:a4:23:ff:fe:02:38:60 @ c#;60:a4:23:ff:fe:02:38:60 @ b#;60:a4:23:ff:fe:02:38:60 @ d#;60:a4:23:ff:fe:02:39:7b %;60:a4:23:ff:fe:02:51:70 @b%;60:a4:23:ff:fe:02:3b:b4 @$";60:a4:23:ff:fe:02:3b:b4  4!";60:a4:23:ff:fe:02:38:60  4 ";60:a4:23:ff:fe:02:39:7b  m#; 60:a4:23:ff:fe:02:39:7b %;60:a4:23:ff:fe:02:39:7b @%;60:a4:23:ff:fe:02:38:60 @$a ";60:a4:23:ff:fe:02:38:af  4&!;60:a4:23:ff:fe:02:54:1e a!;60:a4:23:ff:fe:02:54:1e `$;60:a4:23:ff:fe:02:51:70 @ [$;60:a4:23:ff:fe:02:51:70 @ Z$;60:a4:23:ff:fe:02:51:70 @ \ 2 $;60:a4:23:ff:fe:02:38:60 $_$;60:a4:23:ff:fe:02:3b:b4 $$;60:a4:23:ff:fe:02:38:af $$;60:a4:23:ff:fe:02:39:7b $;60:a4:23:ff:fe:02:51:70 %^$;60:a4:23:ff:fe:02:54:1e %x!;60:a4:23:ff:fe:02:51:70 R!;60:a4:23:ff:fe:02:51:70 Q$;60:a4:23:ff:fe:02:3b:b4 @ $;60:a4:23:ff:fe:02:3b:b4 @ $;60:a4:23:ff:fe:02:3b:b4 @ /%;60:a4:23:ff:fe:02:39:7b @ m/$;60:a4:23:ff:fe:02:38:59 %N$;60:a4:23:ff:fe:02:38:59 %M%;60:a4:23:ff:fe:02:39:7b @ m%;60:a4:23:ff:fe:02:39:7b @ m!;60:a4:23:ff:fe:02:3b:b4 $;60:a4:23:ff:fe:02:38:59 @ 1$;60:a4:23:ff:fe:02:38:59 @ 0$;60:a4:23:ff:fe:02:38:59 @ 2%;60:a4:23:ff:fe:02:38:59 @$$;60:a4:23:ff:fe:02:38:59 $$;60:a4:23:ff:fe:02:38:59  B _ 3nP, ?  ! n ` y \ 4 UBb: J  lHp(b[33333nh(&&& $; 60:a4:23:ff:fe:02:39:7b  #;60:a4:23:ff:fe:02:39:7b &F4h; 900:15:8d:00:05:4a:73:c3lumi.sensor_motion.aq2#;60:a4:23:ff:fe:02:36:93 @ $;60:a4:23:ff:fe:02:36:93 @  ; ec:1b:bd:ff:fe:94:18:a4 ; 58:8e:81:ff:fe:15:e3:ff!W(e;60:a4:23:ff:fe:02:36:a0 GL-B-008P#^;60:a4:23:ff:fe:02:36:24 @ $];60:a4:23:ff:fe:02:36:24 @ $\;60:a4:23:ff:fe:02:36:24 @ H;60:a4:23:ff:fe:02:36:24 'G;60:a4:23:ff:fe:02:36:24 GLEDOPTO(F;60:a4:23:ff:fe:02:36:24 GL-B-008P(n;60:a4:23:ff:fe:02:36:93 GL-B-008P 'f;60:a4:23:ff:fe:02:36:a0 GLEDOPTO ;  cc:cc:cc:ff:fe:29:2d:ab ; cc:cc:cc:ff:fe:29:2d:ab $i;60:a4:23:ff:fe:02:36:a0 @ $h;60:a4:23:ff:fe:02:36:a0 @ g;60:a4:23:ff:fe:02:36:a0  a; 80:4b:50:ff:fe:41:59:63  `;80:4b:50:ff:fe:41:59:63 'o;60:a4:23:ff:fe:02:36:93 GLEDOPTO&#j;60:a4:23:ff:fe:02:36:a0 @ p;60:a4:23:ff:fe:02:36:93 t ;; 60:a4:23:ff:fe:02:36:93  :;60:a4:23:ff:fe:02:36:93  <[ r;80:4b:50:ff:fe:41:67:d4  f; 80:4b:50:ff:fe:41:58:f3  e;80:4b:50:ff:fe:41:58:f3  $$";80:4b:50:ff:fe:41:59:63 @ (!;80:4b:50:ff:fe:41:59:63 GL-B-001P $H$;60:a4:23:ff:fe:02:36:93 @ a; ec:1b:bd:ff:fe:37:72:7e ;'D;80:4b:50:ff:fe:41:58:f3 GLEDOPTO$#;80:4b:50:ff:fe:41:59:63 @ !B;60:a4:23:ff:fe:02:38:59 A;60:a4:23:ff:fe:02:38:59 #?;60:a4:23:ff:fe:02:2f:96  9#H;80:4b:50:ff:fe:41:58:f3 @ $G;80:4b:50:ff:fe:41:58:f3 @ $F;80:4b:50:ff:fe:41:58:f3 @ (E;80:4b:50:ff:fe:41:58:f3 GL-B-001P0; 58:8e:81:ff:fe:15:e3:ff3 /; 58:8e:81:ff:fe:15:e3:ff1 9 [;60:a4:23:ff:fe:02:38:59 A ; N00:15:8d:00:05:4a:73:c3! (!!G$ !=d ! L \; 60:a4:23:ff:fe:02:38:59 ;  cc:cc:cc:ff:fe:29:2d:abi s; 80:4b:50:ff:fe:41:67:d4  y; cc:cc:cc:ff:fe:29:27:01 x; cc:cc:cc:ff:fe:29:4d:f4v;  cc:cc:cc:ff:fe:29:27:01u;  cc:cc:cc:ff:fe:29:4d:f4 4; 60:a4:23:ff:fe:02:2f:4d  3;60:a4:23:ff:fe:02:2f:4d !!;  ec:1b:bd:ff:fe:94:18:a4 ; 60:a4:23:ff:fe:02:34:91  ~;60:a4:23:ff:fe:02:34:91 p ; 60:a4:23:ff:fe:02:36:9c  ;60:a4:23:ff:fe:02:36:9c $ l; 60:a4:23:ff:fe:02:38:60  k;60:a4:23:ff:fe:02:38:60  A; 60:a4:23:ff:fe:02:38:af  @;60:a4:23:ff:fe:02:38:af  x , { " q  e YMG>4*xxxxxxxxxx"ffffK@ U8 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%.U7 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%MU6 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%`U5 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%-T4 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%0T3 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%?U2 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%U1 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%T0 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%CT/ ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%FU. ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%T- ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%,T, ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%CR+ ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$V* ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30%U) ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%`U( ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91%_U' ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%GU& ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%GV% ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%V$ ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%U# ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%PU" ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%XU! ;;;cc:cc:cc:ff:fe:a5:f2:837f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S% E "usN(3 qK' qK L&jY/  h& u S / } [ 9   o M +  [ 7 n#; 80:4b:50:ff:fe:41:58:f3 #;80:4b:50:ff:fe:41:58:f3 #; 80:4b:50:ff:fe:41:67:d4 #;80:4b:50:ff:fe:41:67:d4 ";80:4b:50:ff:fe:41:67:d4 %~%;80:4b:50:ff:fe:41:58:f3 @U$;80:4b:50:ff:fe:41:58:f3 $;80:4b:50:ff:fe:41:58:f3 %L$;80:4b:50:ff:fe:41:58:f3 % ";80:4b:50:ff:fe:41:58:f3 P";80:4b:50:ff:fe:41:58:f3 %#; 80:4b:50:ff:fe:41:59:63 #;80:4b:50:ff:fe:41:59:63 %;80:4b:50:ff:fe:41:59:63 @1$;80:4b:50:ff:fe:41:59:63 $;80:4b:50:ff:fe:41:59:63 %K$;80:4b:50:ff:fe:41:59:63 %J";80:4b:50:ff:fe:41:59:63 2";80:4b:50:ff:fe:41:59:63 %;80:4b:50:ff:fe:41:67:d4 @$;80:4b:50:ff:fe:41:67:d4 %b";80:4b:50:ff:fe:41:59:63  ";80:4b:50:ff:fe:41:67:d4 %$;80:4b:50:ff:fe:41:67:d4 %d$;80:4b:50:ff:fe:41:67:d4 %c$;80:4b:50:ff:fe:41:67:d4 #; cc:cc:cc:ff:fe:29:2d:ab";80:4b:50:ff:fe:41:59:63 !#; ec:1b:bd:ff:fe:94:18:a4%+!; ec:1b:bd:ff:fe:37:72:7e 4A!; ec:1b:bd:ff:fe:94:18:a4 4!; 68:0a:e2:ff:fe:70:00:69 %|";68:0a:e2:ff:fe:70:00:69 %q";68:0a:e2:ff:fe:70:00:69  o";68:0a:e2:ff:fe:70:00:69 ";68:0a:e2:ff:fe:70:00:69  4$; ec:1b:bd:ff:fe:94:18:a4@ u$; ec:1b:bd:ff:fe:94:18:a4@ t $; ec:1b:bd:ff:fe:94:18:a4@ v#; ec:1b:bd:ff:fe:94:18:a4%}#; ec:1b:bd:ff:fe:94:18:a4%]#; ec:1b:bd:ff:fe:94:18:a4%\";  ec:1b:bd:ff:fe:94:18:a4!"; ec:1b:bd:ff:fe:94:18:a4 !; ec:1b:bd:ff:fe:94:18:a4%B!; ec:1b:bd:ff:fe:94:18:a4!; ec:1b:bd:ff:fe:94:18:a4 n!; ec:1b:bd:ff:fe:94:18:a4c!; ec:1b:bd:ff:fe:37:72:7e$!; ec:1b:bd:ff:fe:37:72:7e$!; ec:1b:bd:ff:fe:37:72:7e ^a ; ec:1b:bd:ff:fe:37:72:7e !!; ec:1b:bd:ff:fe:33:a0:04!; ec:1b:bd:ff:fe:33:a0:04!; ec:1b:bd:ff:fe:33:a0:04 ^]!; ec:1b:bd:ff:fe:33:a0:04!; ec:1b:bd:ff:fe:33:a0:04 ki %;80:4b:50:ff:fe:41:59:63 @ #%;80:4b:50:ff:fe:41:59:63 @ "#; cc:cc:cc:ff:fe:29:4d:f4!#; cc:cc:cc:ff:fe:29:4d:f4 !; cc:cc:cc:ff:fe:29:4d:f4!; cc:cc:cc:ff:fe:29:4d:f4 $;80:4b:50:ff:fe:41:59:63 %;80:4b:50:ff:fe:41:58:f3 @ G%;80:4b:50:ff:fe:41:58:f3 @ F";80:4b:50:ff:fe:41:58:f3 E";80:4b:50:ff:fe:41:58:f3 D#; cc:cc:cc:ff:fe:29:2d:ab%#; cc:cc:cc:ff:fe:29:2d:ab$!; cc:cc:cc:ff:fe:29:2d:ab!; cc:cc:cc:ff:fe:29:2d:abQ$;80:4b:50:ff:fe:41:58:f3 %f%;80:4b:50:ff:fe:41:58:f3 @ H%;80:4b:50:ff:fe:41:67:d4 @ ";80:4b:50:ff:fe:41:67:d4 ";80:4b:50:ff:fe:41:67:d4 #; cc:cc:cc:ff:fe:29:27:01##; cc:cc:cc:ff:fe:29:27:01"!; cc:cc:cc:ff:fe:29:27:01!; cc:cc:cc:ff:fe:29:27:01%;80:4b:50:ff:fe:41:67:d4 @ %;80:4b:50:ff:fe:41:67:d4 @ $;60:a4:23:ff:fe:02:54:1e @ p$;60:a4:23:ff:fe:02:54:1e @ o ,MB 6 / ~ % r  f ZRI>2&shQd ;;;60:a4:23:ff:fe:02:38:af7f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$cVc ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30%Ub ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%kUa ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%EU` ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%NV_ ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91U^ ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%BU] ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%9V\ ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%U[ ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%IUZ ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%;VY ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%UX ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%,UW ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%aUV ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%?UU ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%;TT ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%5US ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%TR ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%LTQ ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%5RP ;;;60:a4:23:ff:fe:02:36:a07f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$VO ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30%UN ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%@UM ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%?VL ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91%UK ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%?VJ ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%UI ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%=VH ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%UG ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%IUF ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%ITE ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%BUD ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%TC ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%PRB ;;;60:a4:23:ff:fe:02:36:247f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$UA ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6500:15:8d:00:05:1e:0e:32U@ ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30}V? ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%U> ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%KU= ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91%cU< ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%OU; ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%8U: ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%5V9 ;;;60:a4:23:ff:fe:02:39:7b7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%% bmI> > ^ s`:  [ 5 a j L  `    %uNiB7G$ p`9sLwP+*   ;% D &+# w; 60:a4:23:ff:fe:02:54:1e  v;60:a4:23:ff:fe:02:54:1e  c; 60:a4:23:ff:fe:02:32:30  b;60:a4:23:ff:fe:02:32:30  h;60:a4:23:ff:fe:02:38:af !X; 00:15:8d:00:05:1e:0e:32\!W; 00:15:8d:00:05:1e:0e:32k V; 00:15:8d:00:05:1e:0e:32!U; 00:15:8d:00:05:1e:0e:32 ET; V00:15:8d:00:05:1e:0e:32! !!@$d)ke!\f+= !#o;60:a4:23:ff:fe:02:2f:4d j$n;60:a4:23:ff:fe:02:2f:4d  B; ec:1b:bd:ff:fe:94:18:a4 |; 68:0a:e2:ff:fe:70:00:69 B"}; ec:1b:bd:ff:fe:94:18:a4#d;80:4b:50:ff:fe:41:67:d4 j$c;80:4b:50:ff:fe:41:67:d4 !b; 80:4b:50:ff:fe:41:67:d4 ;60:a4:23:ff:fe:02:36:9c !;80:4b:50:ff:fe:41:67:d4 ~;80:4b:50:ff:fe:41:67:d4 !i;60:a4:23:ff:fe:02:38:af #g;60:a4:23:ff:fe:02:38:af c#f;80:4b:50:ff:fe:41:58:f3 "a;60:a4:23:ff:fe:02:38:60 @#;;60:a4:23:ff:fe:02:36:24 "_;60:a4:23:ff:fe:02:38:60 $z;60:a4:23:ff:fe:02:54:1e #]; ec:1b:bd:ff:fe:94:18:a4#\; ec:1b:bd:ff:fe:94:18:a4!G;60:a4:23:ff:fe:02:34:91 !;60:a4:23:ff:fe:02:36:9c  C;60:a4:23:ff:fe:02:2f:42 Lq;68:0a:e2:ff:fe:70:00:69 #y;60:a4:23:ff:fe:02:54:1e #N;60:a4:23:ff:fe:02:38:59 j$M;60:a4:23:ff:fe:02:38:59 #F;60:a4:23:ff:fe:02:2f:96 ;80:4b:50:ff:fe:41:58:f3 #L;80:4b:50:ff:fe:41:58:f3 j#E;60:a4:23:ff:fe:02:30:39  k;60:a4:23:ff:fe:02:2f:4d L#K;80:4b:50:ff:fe:41:59:63 j$J;80:4b:50:ff:fe:41:59:63  A;60:a4:23:ff:fe:02:2f:96 L#m;60:a4:23:ff:fe:02:2f:4d !+; ec:1b:bd:ff:fe:94:18:a4 E"2;60:a4:23:ff:fe:02:34:91 @#Q;60:a4:23:ff:fe:02:34:91 "0;60:a4:23:ff:fe:02:34:91  {; ec:1b:bd:ff:fe:37:72:7ez;  ec:1b:bd:ff:fe:37:72:7e ""^;60:a4:23:ff:fe:02:51:70 &; 00:15:8d:00:05:4a:73:c3 "Z;60:a4:23:ff:fe:02:3b:b4 @#Y;60:a4:23:ff:fe:02:3b:b4 j$X;60:a4:23:ff:fe:02:3b:b4 #W;60:a4:23:ff:fe:02:3b:b4 !V; 60:a4:23:ff:fe:02:3b:b4 !S;60:a4:23:ff:fe:02:3b:b4 Q;60:a4:23:ff:fe:02:3b:b4 V; 58:8e:81:ff:fe:15:e3:ff!W"s;60:a4:23:ff:fe:02:38:af @"q;60:a4:23:ff:fe:02:38:af  7!$?;60:a4:23:ff:fe:02:2f:96 $ ;80:4b:50:ff:fe:41:58:f3 "{;60:a4:23:ff:fe:02:54:1e @"x;60:a4:23:ff:fe:02:54:1e !v; 60:a4:23:ff:fe:02:38:59 ";60:a4:23:ff:fe:02:32:30 @"p;60:a4:23:ff:fe:02:2f:4d @"l;60:a4:23:ff:fe:02:2f:4d  '; 00:15:8d:00:05:4a:73:c3!#@;60:a4:23:ff:fe:02:2f:96 j"y;60:a4:23:ff:fe:02:38:59 @ "j;60:a4:23:ff:fe:02:2f:96 @"f;60:a4:23:ff:fe:02:2f:96  N; 58:8e:81:ff:fe:15:e3:ff!W'Y; 00:15:8d:00:05:1e:0e:32@##9;60:a4:23:ff:fe:02:38:60  !t; 60:a4:23:ff:fe:02:36:9c A%; N00:15:8d:00:05:4a:73:c3! (!!G$ !=d !W; 58:8e:81:ff:fe:15:e3:ff  5; 60:a4:23:ff:fe:02:51:70  4;60:a4:23:ff:fe:02:51:70 "u;60:a4:23:ff:fe:02:36:9c @"#;60:a4:23:ff:fe:02:30:39 @"!;60:a4:23:ff:fe:02:30:39 b =; 60:a4:23:ff:fe:02:30:39  <;60:a4:23:ff:fe:02:30:39  [; 60:a4:23:ff:fe:02:36:24  Z;60:a4:23:ff:fe:02:36:24  Y; 60:a4:23:ff:fe:02:36:a0  X;60:a4:23:ff:fe:02:36:a0  [; 60:a4:23:ff:fe:02:2f:42  Z;60:a4:23:ff:fe:02:2f:42  P; 60:a4:23:ff:fe:02:2f:96  O;60:a4:23:ff:fe:02:2f:96  -dQF < 2 * w  k _RKC:.z ldTm ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%Tl ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4% Tk ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%dVj ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%Vi ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%Vh ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%Vg ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%Vf ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%Ue ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%(Vd ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%Uc ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%kUb ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%Ta ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%GT` ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%kT_ ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%\T^ ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%8T] ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%@T\ ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%tQ[ ;;;ec:1b:bd:ff:fe:33:a0:047f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$VZ ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%UY ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%MVX ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%VW ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%UV ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%UU ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%FVT ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%US ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%TUR ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%FVQ ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%VP ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%UO ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%sTN ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%wTM ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%ATL ;;;60:a4:23:ff:fe:02:38:597f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%* tV 4    vXf l N 0   2 2X;60:a4:23:ff:fe:02:51:70T;60:a4:23:ff:fe:02:3b:b41;60:a4:23:ff:fe:02:3b:b40;60:a4:23:ff:fe:02:3b:b4/;60:a4:23:ff:fe:02:3b:b4.;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:51:70W;60:a4:23:ff:fe:02:51:70V;60:a4:23:ff:fe:02:51:70U;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:51:70X;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;60:a4:23:ff:fe:02:51:70[;60:a4:23:ff:fe:02:51:70Z;60:a4:23:ff:fe:02:51:70Y;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:3b:b45;60:a4:23:ff:fe:02:3b:b44;60:a4:23:ff:fe:02:3b:b43;60:a4:23:ff:fe:02:3b:b42;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:51:70\;60:a4:23:ff:fe:02:3b:b46;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:51:70^;60:a4:23:ff:fe:02:51:70];60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:3b:b48;60:a4:23:ff:fe:02:3b:b47;60:a4:23:ff:fe:02:51:70_;60:a4:23:ff:fe:02:3b:b49;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:3b:b4;;60:a4:23:ff:fe:02:3b:b4:;60:a4:23:ff:fe:02:51:70c;60:a4:23:ff:fe:02:51:70b;60:a4:23:ff:fe:02:51:70a;60:a4:23:ff:fe:02:51:70`;60:a4:23:ff:fe:02:3b:b4@;60:a4:23:ff:fe:02:3b:b4?;60:a4:23:ff:fe:02:3b:b4>;60:a4:23:ff:fe:02:3b:b4=;60:a4:23:ff:fe:02:3b:b4<;68:0a:e2:ff:fe:70:00:69;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;60:a4:23:ff:fe:02:39:7b;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69 ;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;68:0a:e2:ff:fe:70:00:69;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:54:1e;60:a4:23:ff:fe:02:51:70h;60:a4:23:ff:fe:02:51:70g;60:a4:23:ff:fe:02:51:70f;60:a4:23:ff:fe:02:51:70e;60:a4:23:ff:fe:02:51:70d;68:0a:e2:ff:fe:70:00:6932%+, c~^> } ] =  ~ _ @ !  e F '  j J * jK, oO/ i I ) nN.mM- lL, _?!;80:4b:50:ff:fe:41:58:f3  ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:58:f3 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:59:63 ;80:4b:50:ff:fe:41:67:d4 ;80:4b:50:ff:fe:41:67:d4 ;80:4b:50:ff:fe:41:67:d4 ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4  ;80:4b:50:ff:fe:41:67:d4 y;60:a4:23:ff:fe:02:30:39 x;60:a4:23:ff:fe:02:30:39 w;60:a4:23:ff:fe:02:30:39 v;60:a4:23:ff:fe:02:30:39 u;60:a4:23:ff:fe:02:30:39 t;60:a4:23:ff:fe:02:30:39 s;60:a4:23:ff:fe:02:30:39 r;60:a4:23:ff:fe:02:30:39 q; ec:1b:bd:ff:fe:33:a0:04p; ec:1b:bd:ff:fe:33:a0:04 o; ec:1b:bd:ff:fe:33:a0:04n; ec:1b:bd:ff:fe:33:a0:04m; ec:1b:bd:ff:fe:33:a0:04l; ec:1b:bd:ff:fe:33:a0:04k; ec:1b:bd:ff:fe:33:a0:04j; ec:1b:bd:ff:fe:33:a0:04;60:a4:23:ff:fe:02:38:59 ;60:a4:23:ff:fe:02:38:59 ;60:a4:23:ff:fe:02:38:59 ~;60:a4:23:ff:fe:02:38:59 };60:a4:23:ff:fe:02:38:59 |;60:a4:23:ff:fe:02:38:59 {;60:a4:23:ff:fe:02:38:59 z;60:a4:23:ff:fe:02:38:59 a;60:a4:23:ff:fe:02:54:1e `;60:a4:23:ff:fe:02:54:1e _;60:a4:23:ff:fe:02:54:1e ^;60:a4:23:ff:fe:02:54:1e ];60:a4:23:ff:fe:02:54:1e \;60:a4:23:ff:fe:02:54:1e [;60:a4:23:ff:fe:02:54:1e Z;60:a4:23:ff:fe:02:54:1e Y; ec:1b:bd:ff:fe:37:72:7eX; ec:1b:bd:ff:fe:37:72:7e W; ec:1b:bd:ff:fe:37:72:7eV; ec:1b:bd:ff:fe:37:72:7eU; ec:1b:bd:ff:fe:37:72:7eT; ec:1b:bd:ff:fe:37:72:7eS; ec:1b:bd:ff:fe:37:72:7eR; ec:1b:bd:ff:fe:37:72:7eQ; ec:1b:bd:ff:fe:94:18:a4P; ec:1b:bd:ff:fe:94:18:a4 O; ec:1b:bd:ff:fe:94:18:a4N; ec:1b:bd:ff:fe:94:18:a4M; ec:1b:bd:ff:fe:94:18:a4L; ec:1b:bd:ff:fe:94:18:a4K; ec:1b:bd:ff:fe:94:18:a4J; ec:1b:bd:ff:fe:94:18:a4I; ec:1b:bd:ff:fe:94:18:a4H;60:a4:23:ff:fe:02:38:af G;60:a4:23:ff:fe:02:38:af F;60:a4:23:ff:fe:02:38:af E;60:a4:23:ff:fe:02:38:af D;60:a4:23:ff:fe:02:38:af C;60:a4:23:ff:fe:02:38:af B;60:a4:23:ff:fe:02:38:af A;60:a4:23:ff:fe:02:38:af @;60:a4:23:ff:fe:02:2f:96 ?;60:a4:23:ff:fe:02:2f:96 >;60:a4:23:ff:fe:02:2f:96 =;60:a4:23:ff:fe:02:2f:96 <;60:a4:23:ff:fe:02:2f:96 ;;60:a4:23:ff:fe:02:2f:96 :;60:a4:23:ff:fe:02:2f:96 9;60:a4:23:ff:fe:02:2f:96 );60:a4:23:ff:fe:02:36:a0 (;60:a4:23:ff:fe:02:36:a0 ';60:a4:23:ff:fe:02:36:a0 &;60:a4:23:ff:fe:02:36:a0 %;60:a4:23:ff:fe:02:36:a0 $;60:a4:23:ff:fe:02:36:a0 #;60:a4:23:ff:fe:02:36:a0 ";60:a4:23:ff:fe:02:36:a0 0;60:a4:23:ff:fe:02:51:70 /;60:a4:23:ff:fe:02:51:70  )MB 7 , | $ s  f [OF=/}#pUC ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%|UB ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%kVA ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%U@ ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%IV? ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63U> ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%MU= ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%dV< ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%V; ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%V: ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%T9 ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%vU8 ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%T7 ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%\T6 ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%sU5 ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%T4 ;;;80:4b:50:ff:fe:41:58:f37f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%\V3 ;;;80:4b:50:ff:fe:41:59:637f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%U2 ;;;80:4b:50:ff:fe:41:59:637f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%jU1 ;;;80:4b:50:ff:fe:41:59:637f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%pU0 ;;;80:4b:50:ff:fe:41:59:637f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%HU/ ;;;80:4b:50:ff:fe:41:59:637f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%  l N 0 dF( tVD&r6jL.X:8  ~ ` B $  p R 4 bfH* v P 2 n n;80:4b:50:ff:fe:41:58:f3C;80:4b:50:ff:fe:41:58:f3B;cc:cc:cc:ff:fe:a5:f2:83;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:94:18:a4+;ec:1b:bd:ff:fe:94:18:a4*;ec:1b:bd:ff:fe:94:18:a4);ec:1b:bd:ff:fe:94:18:a4(;ec:1b:bd:ff:fe:94:18:a4';ec:1b:bd:ff:fe:94:18:a4&;ec:1b:bd:ff:fe:94:18:a4%;ec:1b:bd:ff:fe:94:18:a4$;ec:1b:bd:ff:fe:94:18:a4#;ec:1b:bd:ff:fe:94:18:a4";ec:1b:bd:ff:fe:94:18:a4!;ec:1b:bd:ff:fe:94:18:a4 ;ec:1b:bd:ff:fe:94:18:a4;ec:1b:bd:ff:fe:94:18:a4;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;80:4b:50:ff:fe:41:59:63";80:4b:50:ff:fe:41:67:d4!;80:4b:50:ff:fe:41:67:d4 ;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;80:4b:50:ff:fe:41:67:d4;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:37:72:7e;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:33:a0:04;ec:1b:bd:ff:fe:94:18:a4-;ec:1b:bd:ff:fe:94:18:a4,;80:4b:50:ff:fe:41:58:f3A;80:4b:50:ff:fe:41:58:f3@;80:4b:50:ff:fe:41:58:f3?;80:4b:50:ff:fe:41:58:f3>;80:4b:50:ff:fe:41:58:f3=;80:4b:50:ff:fe:41:58:f3<;80:4b:50:ff:fe:41:58:f3;;80:4b:50:ff:fe:41:58:f3:;80:4b:50:ff:fe:41:58:f39;80:4b:50:ff:fe:41:58:f38;80:4b:50:ff:fe:41:58:f37;80:4b:50:ff:fe:41:58:f36;80:4b:50:ff:fe:41:58:f35;80:4b:50:ff:fe:41:58:f34;80:4b:50:ff:fe:41:59:633;80:4b:50:ff:fe:41:59:632;80:4b:50:ff:fe:41:59:631;80:4b:50:ff:fe:41:59:630;80:4b:50:ff:fe:41:59:63/;80:4b:50:ff:fe:41:59:63.;80:4b:50:ff:fe:41:59:63-;80:4b:50:ff:fe:41:59:63,;80:4b:50:ff:fe:41:59:63+;80:4b:50:ff:fe:41:59:63*;80:4b:50:ff:fe:41:59:63);80:4b:50:ff:fe:41:59:63(;80:4b:50:ff:fe:41:59:63';80:4b:50:ff:fe:41:59:63&;80:4b:50:ff:fe:41:59:63%;80:4b:50:ff:fe:41:59:63$;80:4b:50:ff:fe:41:59:63#;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83;cc:cc:cc:ff:fe:a5:f2:83)))3 ,PC 7 + w  m  c VI<1&uhZV< ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%V; ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%V: ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%V9 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%V8 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%V7 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%V6 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%U5 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%U4 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%T3 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%OU2 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%U1 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%U0 ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%U/ ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%U. ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%U- ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%nV, ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%U+ ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%`V* ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%V) ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%V( ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%U' ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%oV& ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%V% ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%U$ ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%T# ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%aU" ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%U! ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%U ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%T ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%}U ;;;ec:1b:bd:ff:fe:94:18:a47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%V ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%V ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%V ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%U ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91%GU ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%uU ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%jV ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%U ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%`V ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%V ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%U ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%T ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%NT ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%r ,PE 8 - z ! n  f \OB6(z"pe V@ ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30%U? ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%^U> ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%mU= ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%IU< ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%JU; ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%U: ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%T9 ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%8Q8 ;;;60:a4:23:ff:fe:02:34:917f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83[U7 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%\V6 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%V5 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%V4 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%V3 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%U2 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%BU1 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%tV0 ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%U/ ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%KV. ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%V- ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%U, ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%PV+ ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%U* ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%T) ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%SU( ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%T' ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%XT& ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%tT% ;;;60:a4:23:ff:fe:02:30:397f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%vU$ ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%xV# ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%U" ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%hV! ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%7U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%]U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%oU ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%[V ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%V ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%]V ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%T ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%uT ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%3T ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%5 ,OG ; . { " n  b YNE7*vof U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%T ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%~T ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%^U ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%Q ;;;60:a4:23:ff:fe:02:54:1e7f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83$BU ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%^U ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%zV ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%V ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%U ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%RV ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%V ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%V ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%V ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%V ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%U ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%T ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%HT ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%DU ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%U ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%U ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%T~ ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%hU} ;;;60:a4:23:ff:fe:02:2f:427f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%T| ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6500:15:8d:00:05:4a:73:c3u6U{ ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%sVz ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%Uy ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69%kVx ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%Vw ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%Uv ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%DUu ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%pVt ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%Us ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%AVr ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%Vq ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%Up ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%kVo ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%Un ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%Tm ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%vTl ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%RTk ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%mTj ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%oUi ;;;60:a4:23:ff:fe:02:2f:4d7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39% ,PG < / { ! n  e ]TG:,ylf U ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%U ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%U ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%T ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%fQ ;;;60:a4:23:ff:fe:02:36:9c7f:4f:6a:3c:05:70:f6:65cc:cc:cc:ff:fe:a5:f2:83aU ;;;60:a4:23:ff:fe:02:38:607f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:32:30% ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6568:0a:e2:ff:fe:70:00:69U= ;;;60:a4:23:ff:fe:02:3b:b47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:34:91%G -ePG : / | $ s  k c WLA5,|$qeU ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%8U ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%ZV ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%V ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%U ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%jT ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%UT ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%jT ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%zU ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%T ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%FT ;;;80:4b:50:ff:fe:41:67:d47f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%~V ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%fU ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%6U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%]U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:39:7b%6U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%/U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:37:72:7eG%1U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:93%%-U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:94:18:a4%/U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4|V ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%U ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%KT ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%XT ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%kT ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:24M%'T ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%8T ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%\T~ ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%ZU} ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:60K%T| ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:67:d4%aT{ ;;;68:0a:e2:ff:fe:70:00:697f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:30:39%wUz ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:af%Vy ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:42_%Ux ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:51:70Ջ%{Uw ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:54:1eq%GUv ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:59:63%Vu ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:3b:b4%Ut ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:9c%TVs ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:65ec:1b:bd:ff:fe:33:a0:04S%Ur ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:96m%Tq ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:38:59XM%&Tp ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:36:a0G%To ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6560:a4:23:ff:fe:02:2f:4d=%kTn ;;;ec:1b:bd:ff:fe:37:72:7e7f:4f:6a:3c:05:70:f6:6580:4b:50:ff:fe:41:58:f3%"zigpy-0.62.3/tests/databases/simple_v3.sql000066400000000000000000000121501456054056700204720ustar00rootroot00000000000000PRAGMA foreign_keys=OFF; PRAGMA user_version=3; BEGIN TRANSACTION; CREATE TABLE devices (ieee ieee, nwk, status); CREATE TABLE endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE neighbors (device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL,ieee ieee NOT NULL, nwk INTEGER NOT NULL, struct INTEGER NOT NULL, permit_joining INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE node_descriptors (ieee ieee, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE output_clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE attributes (ieee ieee, endpoint_id, cluster, attrid, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE groups (group_id, name); CREATE TABLE group_members (group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE relays (ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE UNIQUE INDEX ieee_idx ON devices(ieee); CREATE UNIQUE INDEX endpoint_idx ON endpoints(ieee, endpoint_id); CREATE UNIQUE INDEX cluster_idx ON clusters(ieee, endpoint_id, cluster); CREATE INDEX neighbors_idx ON neighbors(device_ieee); CREATE UNIQUE INDEX node_descriptors_idx ON node_descriptors(ieee); CREATE UNIQUE INDEX output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX group_idx ON groups(group_id); CREATE UNIQUE INDEX group_members_idx ON group_members(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX relays_idx ON relays(ieee); INSERT INTO attributes VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,4,'IKEA of Sweden'); INSERT INTO attributes VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,5,'TRADFRI control outlet'); INSERT INTO attributes VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,4,'con'); INSERT INTO attributes VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,5,'ZBT-CCTLight-GLS0109'); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,3); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4096); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,5); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,6); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,64636); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,8); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',242,33); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,2821); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,3); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,4); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,4096); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,5); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,6); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,64642); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,768); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,8); INSERT INTO devices VALUES('00:0d:6f:ff:fe:a6:11:7a',48461,2); INSERT INTO devices VALUES('ec:1b:bd:ff:fe:54:4f:40',27932,2); INSERT INTO endpoints VALUES('00:0d:6f:ff:fe:a6:11:7a',1,260,266,1); INSERT INTO endpoints VALUES('00:0d:6f:ff:fe:a6:11:7a',242,41440,97,1); INSERT INTO endpoints VALUES('ec:1b:bd:ff:fe:54:4f:40',1,260,268,1); INSERT INTO endpoints VALUES('ec:1b:bd:ff:fe:54:4f:40',242,41440,97,1); INSERT INTO neighbors VALUES('00:0d:6f:ff:fe:a6:11:7a','81:b1:12:dc:9f:bd:f4:b6','ec:1b:bd:ff:fe:54:4f:40',27932,37,2,15,130); INSERT INTO neighbors VALUES('ec:1b:bd:ff:fe:54:4f:40','81:b1:12:dc:9f:bd:f4:b6','00:0d:6f:ff:fe:a6:11:7a',48461,37,2,15,132); INSERT INTO node_descriptors VALUES('00:0d:6f:ff:fe:a6:11:7a',X'01408e7c11525200002c520000'); INSERT INTO node_descriptors VALUES('ec:1b:bd:ff:fe:54:4f:40',X'01408e6811525200002c520000'); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,25); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,32); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4096); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,5); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',242,33); INSERT INTO output_clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,10); INSERT INTO output_clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,25); INSERT INTO output_clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',242,33); INSERT INTO relays VALUES('00:0d:6f:ff:fe:a6:11:7a',X'00'); INSERT INTO relays VALUES('ec:1b:bd:ff:fe:54:4f:40',X'00'); COMMIT;zigpy-0.62.3/tests/databases/simple_v3_to_v4.sql000066400000000000000000000161421456054056700216120ustar00rootroot00000000000000PRAGMA foreign_keys=OFF; PRAGMA user_version=4; BEGIN TRANSACTION; CREATE TABLE devices (ieee ieee, nwk, status); INSERT INTO devices VALUES('00:0d:6f:ff:fe:a6:11:7a',48461,2); INSERT INTO devices VALUES('ec:1b:bd:ff:fe:54:4f:40',27932,2); CREATE TABLE endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); INSERT INTO endpoints VALUES('00:0d:6f:ff:fe:a6:11:7a',1,260,266,1); INSERT INTO endpoints VALUES('00:0d:6f:ff:fe:a6:11:7a',242,41440,97,1); INSERT INTO endpoints VALUES('ec:1b:bd:ff:fe:54:4f:40',1,260,268,1); INSERT INTO endpoints VALUES('ec:1b:bd:ff:fe:54:4f:40',242,41440,97,1); CREATE TABLE clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,3); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4096); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,5); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,6); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,64636); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,8); INSERT INTO clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',242,33); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,2821); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,3); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,4); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,4096); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,5); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,6); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,64642); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,768); INSERT INTO clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,8); CREATE TABLE neighbors (device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL,ieee ieee NOT NULL, nwk INTEGER NOT NULL, struct INTEGER NOT NULL, permit_joining INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices(ieee) ON DELETE CASCADE); INSERT INTO neighbors VALUES('00:0d:6f:ff:fe:a6:11:7a','81:b1:12:dc:9f:bd:f4:b6','ec:1b:bd:ff:fe:54:4f:40',27932,37,2,15,130); INSERT INTO neighbors VALUES('ec:1b:bd:ff:fe:54:4f:40','81:b1:12:dc:9f:bd:f4:b6','00:0d:6f:ff:fe:a6:11:7a',48461,37,2,15,132); CREATE TABLE node_descriptors (ieee ieee, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); INSERT INTO node_descriptors VALUES('00:0d:6f:ff:fe:a6:11:7a',X'01408e7c11525200002c520000'); INSERT INTO node_descriptors VALUES('ec:1b:bd:ff:fe:54:4f:40',X'01408e6811525200002c520000'); CREATE TABLE output_clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,25); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,32); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4096); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',1,5); INSERT INTO output_clusters VALUES('00:0d:6f:ff:fe:a6:11:7a',242,33); INSERT INTO output_clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,10); INSERT INTO output_clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',1,25); INSERT INTO output_clusters VALUES('ec:1b:bd:ff:fe:54:4f:40',242,33); CREATE TABLE attributes (ieee ieee, endpoint_id, cluster, attrid, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); INSERT INTO attributes VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,4,'IKEA of Sweden'); INSERT INTO attributes VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,5,'TRADFRI control outlet'); INSERT INTO attributes VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,4,'con'); INSERT INTO attributes VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,5,'ZBT-CCTLight-GLS0109'); CREATE TABLE groups (group_id, name); CREATE TABLE group_members (group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE relays (ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); INSERT INTO relays VALUES('00:0d:6f:ff:fe:a6:11:7a',X'00'); INSERT INTO relays VALUES('ec:1b:bd:ff:fe:54:4f:40',X'00'); CREATE TABLE node_descriptors_v4 ( ieee ieee, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE ); INSERT INTO node_descriptors_v4 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,0,0,0,8,142,4476,82,82,11264,82,0); INSERT INTO node_descriptors_v4 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,0,0,0,8,142,4456,82,82,11264,82,0); CREATE TABLE neighbors_v4 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL ); INSERT INTO neighbors_v4 VALUES('00:0d:6f:ff:fe:a6:11:7a','81:b1:12:dc:9f:bd:f4:b6','ec:1b:bd:ff:fe:54:4f:40',27932,1,1,2,0,2,0,15,130); INSERT INTO neighbors_v4 VALUES('ec:1b:bd:ff:fe:54:4f:40','81:b1:12:dc:9f:bd:f4:b6','00:0d:6f:ff:fe:a6:11:7a',48461,1,1,2,0,2,0,15,132); CREATE UNIQUE INDEX ieee_idx ON devices(ieee); CREATE UNIQUE INDEX endpoint_idx ON endpoints(ieee, endpoint_id); CREATE UNIQUE INDEX cluster_idx ON clusters(ieee, endpoint_id, cluster); CREATE INDEX neighbors_idx ON neighbors(device_ieee); CREATE UNIQUE INDEX node_descriptors_idx ON node_descriptors(ieee); CREATE UNIQUE INDEX output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX group_idx ON groups(group_id); CREATE UNIQUE INDEX group_members_idx ON group_members(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX relays_idx ON relays(ieee); CREATE UNIQUE INDEX node_descriptors_idx_v4 ON node_descriptors_v4(ieee); CREATE INDEX neighbors_idx_v4 ON neighbors_v4(device_ieee); COMMIT;zigpy-0.62.3/tests/databases/simple_v5.sql000066400000000000000000000157101456054056700205010ustar00rootroot00000000000000PRAGMA foreign_keys=OFF; PRAGMA user_version=5; BEGIN TRANSACTION; CREATE TABLE devices_v5 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL ); INSERT INTO devices_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',48461,2); INSERT INTO devices_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',27932,2); CREATE TABLE endpoints_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); INSERT INTO endpoints_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,260,266,1); INSERT INTO endpoints_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',242,41440,97,1); INSERT INTO endpoints_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,260,268,1); INSERT INTO endpoints_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',242,41440,97,1); CREATE TABLE in_clusters_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,3); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4096); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,5); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,6); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,64636); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,8); INSERT INTO in_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',242,33); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,2821); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,3); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,4); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,4096); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,5); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,6); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,64642); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,768); INSERT INTO in_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,8); CREATE TABLE neighbors_v5 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); INSERT INTO neighbors_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a','81:b1:12:dc:9f:bd:f4:b6','ec:1b:bd:ff:fe:54:4f:40',27932,1,1,2,0,2,0,15,130); INSERT INTO neighbors_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40','81:b1:12:dc:9f:bd:f4:b6','00:0d:6f:ff:fe:a6:11:7a',48461,1,1,2,0,2,0,15,132); CREATE TABLE node_descriptors_v5 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); INSERT INTO node_descriptors_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,0,0,0,8,142,4476,82,82,11264,82,0); INSERT INTO node_descriptors_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,0,0,0,8,142,4456,82,82,11264,82,0); CREATE TABLE out_clusters_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); INSERT INTO out_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,25); INSERT INTO out_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,32); INSERT INTO out_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,4096); INSERT INTO out_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,5); INSERT INTO out_clusters_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',242,33); INSERT INTO out_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,10); INSERT INTO out_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,25); INSERT INTO out_clusters_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',242,33); CREATE TABLE attributes_cache_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters that won't be present in the DB but whose -- values still need to be cached FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); INSERT INTO attributes_cache_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,4,'IKEA of Sweden'); INSERT INTO attributes_cache_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',1,0,5,'TRADFRI control outlet'); INSERT INTO attributes_cache_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,4,'con'); INSERT INTO attributes_cache_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',1,0,5,'ZBT-CCTLight-GLS0109'); CREATE TABLE groups_v5 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE TABLE group_members_v5 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v5(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); CREATE TABLE relays_v5 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); INSERT INTO relays_v5 VALUES('00:0d:6f:ff:fe:a6:11:7a',X'00'); INSERT INTO relays_v5 VALUES('ec:1b:bd:ff:fe:54:4f:40',X'00'); CREATE UNIQUE INDEX devices_idx_v5 ON devices_v5(ieee); CREATE UNIQUE INDEX endpoint_idx_v5 ON endpoints_v5(ieee, endpoint_id); CREATE UNIQUE INDEX in_clusters_idx_v5 ON in_clusters_v5(ieee, endpoint_id, cluster); CREATE INDEX neighbors_idx_v5 ON neighbors_v5(device_ieee); CREATE UNIQUE INDEX node_descriptors_idx_v5 ON node_descriptors_v5(ieee); CREATE UNIQUE INDEX out_clusters_idx_v5 ON out_clusters_v5(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX attributes_idx_v5 ON attributes_cache_v5(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX groups_idx_v5 ON groups_v5(group_id); CREATE UNIQUE INDEX group_members_idx_v5 ON group_members_v5(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX relays_idx_v5 ON relays_v5(ieee); COMMIT; zigpy-0.62.3/tests/databases/simple_v8.sql000066400000000000000000000266541456054056700205150ustar00rootroot00000000000000PRAGMA user_version=8; PRAGMA foreign_keys=OFF; BEGIN TRANSACTION; CREATE TABLE devices_v8 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL, last_seen unix_timestamp NOT NULL ); INSERT INTO devices_v8 VALUES('00:12:4b:00:1c:a1:b8:46',0,2,1651119833288); INSERT INTO devices_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',44170,2,1651119836445); INSERT INTO devices_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',50064,2,1651119839551); INSERT INTO devices_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',57374,2,1651119830048); CREATE TABLE endpoints_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); INSERT INTO endpoints_v8 VALUES('00:12:4b:00:1c:a1:b8:46',1,260,48879,1); INSERT INTO endpoints_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,260,268,1); INSERT INTO endpoints_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',242,41440,97,1); INSERT INTO endpoints_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,260,268,1); INSERT INTO endpoints_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',242,41440,97,1); INSERT INTO endpoints_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,260,2080,1); CREATE TABLE in_clusters_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v8(ieee, endpoint_id) ON DELETE CASCADE ); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,0); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,3); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,4); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,5); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,6); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,8); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,2821); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,4096); INSERT INTO in_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,64642); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,0); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,3); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,4); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,5); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,6); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,8); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,2821); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,4096); INSERT INTO in_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,64642); INSERT INTO in_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,0); INSERT INTO in_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,1); INSERT INTO in_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,3); INSERT INTO in_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,32); INSERT INTO in_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,4096); CREATE TABLE neighbors_v8 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); INSERT INTO neighbors_v8 VALUES('00:12:4b:00:1c:a1:b8:46','bd:27:0b:38:37:95:dc:87','ec:1b:bd:ff:fe:2f:41:a4',44170,1,1,2,0,2,0,15,255); INSERT INTO neighbors_v8 VALUES('00:12:4b:00:1c:a1:b8:46','bd:27:0b:38:37:95:dc:87','cc:cc:cc:ff:fe:e6:8e:ca',50064,1,1,2,0,2,0,15,255); INSERT INTO neighbors_v8 VALUES('00:12:4b:00:1c:a1:b8:46','bd:27:0b:38:37:95:dc:87','00:0b:57:ff:fe:2b:d4:57',57374,2,0,1,0,0,0,1,255); INSERT INTO neighbors_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4','bd:27:0b:38:37:95:dc:87','00:12:4b:00:1c:a1:b8:46',0,0,1,2,0,2,0,0,253); INSERT INTO neighbors_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4','bd:27:0b:38:37:95:dc:87','cc:cc:cc:ff:fe:e6:8e:ca',50064,1,1,0,0,2,0,15,255); INSERT INTO neighbors_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca','bd:27:0b:38:37:95:dc:87','00:12:4b:00:1c:a1:b8:46',0,0,1,0,0,2,0,0,255); INSERT INTO neighbors_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca','bd:27:0b:38:37:95:dc:87','ec:1b:bd:ff:fe:2f:41:a4',44170,1,1,2,0,2,0,15,255); CREATE TABLE node_descriptors_v8 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); INSERT INTO node_descriptors_v8 VALUES('00:12:4b:00:1c:a1:b8:46',0,0,0,0,0,8,143,43981,82,128,11329,128,0); INSERT INTO node_descriptors_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,0,0,0,0,8,142,4688,82,82,11264,82,0); INSERT INTO node_descriptors_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,0,0,0,0,8,142,4688,82,82,11264,82,0); INSERT INTO node_descriptors_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',2,0,0,0,0,8,128,4476,82,82,11264,82,0); CREATE TABLE out_clusters_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v8(ieee, endpoint_id) ON DELETE CASCADE ); INSERT INTO out_clusters_v8 VALUES('00:12:4b:00:1c:a1:b8:46',1,1280); INSERT INTO out_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,10); INSERT INTO out_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,25); INSERT INTO out_clusters_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',242,33); INSERT INTO out_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,10); INSERT INTO out_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,25); INSERT INTO out_clusters_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',242,33); INSERT INTO out_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,3); INSERT INTO out_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,4); INSERT INTO out_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,6); INSERT INTO out_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,8); INSERT INTO out_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,25); INSERT INTO out_clusters_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,4096); CREATE TABLE attributes_cache_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,0,4,'The Home Depot'); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,0,5,'Ecosmart-ZBT-A19-CCT-Bulb'); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,6,0,1); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,6,16387,1); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,8,0,254); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,16395,153); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,16396,370); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,16394,16); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,0,4,'The Home Depot'); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,0,5,'Ecosmart-ZBT-A19-CCT-Bulb'); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,3,30002); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,4,26876); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,7,370); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,6,0,1); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,8,0,254); INSERT INTO attributes_cache_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,8,2); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,8,2); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,7,370); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,3,30002); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,4,26876); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,6,16387,1); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,16395,153); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,16396,370); INSERT INTO attributes_cache_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,16394,16); INSERT INTO attributes_cache_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,0,4,'IKEA of Sweden'); INSERT INTO attributes_cache_v8 VALUES('00:0b:57:ff:fe:2b:d4:57',1,0,5,'TRADFRI wireless dimmer'); CREATE TABLE groups_v8 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); INSERT INTO groups_v8 VALUES(0,'Default Lightlink Group'); CREATE TABLE group_members_v8 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v8(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v8(ieee, endpoint_id) ON DELETE CASCADE ); INSERT INTO group_members_v8 VALUES(0,'00:12:4b:00:1c:a1:b8:46',1); CREATE TABLE relays_v8 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); INSERT INTO relays_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',X'00'); INSERT INTO relays_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',X'00'); CREATE TABLE unsupported_attributes_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v8(ieee, endpoint_id, cluster) ON DELETE CASCADE ); INSERT INTO unsupported_attributes_v8 VALUES('ec:1b:bd:ff:fe:2f:41:a4',1,768,16386); INSERT INTO unsupported_attributes_v8 VALUES('cc:cc:cc:ff:fe:e6:8e:ca',1,768,16386); CREATE UNIQUE INDEX devices_idx_v8 ON devices_v8(ieee); CREATE UNIQUE INDEX endpoint_idx_v8 ON endpoints_v8(ieee, endpoint_id); CREATE UNIQUE INDEX in_clusters_idx_v8 ON in_clusters_v8(ieee, endpoint_id, cluster); CREATE INDEX neighbors_idx_v8 ON neighbors_v8(device_ieee); CREATE UNIQUE INDEX node_descriptors_idx_v8 ON node_descriptors_v8(ieee); CREATE UNIQUE INDEX out_clusters_idx_v8 ON out_clusters_v8(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX attributes_idx_v8 ON attributes_cache_v8(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX groups_idx_v8 ON groups_v8(group_id); CREATE UNIQUE INDEX group_members_idx_v8 ON group_members_v8(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX relays_idx_v8 ON relays_v8(ieee); CREATE UNIQUE INDEX unsupported_attributes_idx_v8 ON unsupported_attributes_v8(ieee, endpoint_id, cluster, attrid); COMMIT;zigpy-0.62.3/tests/databases/zigbee_20190417_v0.db000066400000000000000000002600001456054056700212170ustar00rootroot00000000000000SQLite format 3@ Vl Vl.,P m7 c r w '!3indexattribute_idxattributes CREATE UNIQUE INDEX attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid)i !!tableattributesattributes CREATE TABLE attributes (ieee ieee, endpoint_id, cluster, attrid, value)1+7indexoutput_cluster_idxoutput_clusters CREATE UNIQUE INDEX output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster)i++ tableoutput_clustersoutput_clustersCREATE TABLE output_clusters (ieee ieee, endpoint_id, cluster)g#indexcluster_idxclustersCREATE UNIQUE INDEX cluster_idx ON clusters(ieee, endpoint_id, cluster)S{tableclustersclustersCREATE TABLE clusters (ieee ieee, endpoint_id, cluster)b% indexendpoint_idxendpointsCREATE UNIQUE INDEX endpoint_idx ON endpoints(ieee, endpoint_id){EtableendpointsendpointsCREATE TABLE endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status)Hgindexieee_idxdevicesCREATE UNIQUE INDEX ieee_idx ON devices(ieee)GgtabledevicesdevicesCREATE TABLE devices (ieee ieee, nwk, status)  m]<zY9  s R~ 1  m-;00:15:8d:00:01:eb:71:ecO,;84:18:26:00:00:d9:86:e7A+;84:18:26:00:00:01:30:50);00:15:8d:00:02:05:a6:41ԧ&;84:18:26:00:00:00:d1:dfz%;84:18:26:00:00:00:d0:fa $;84:18:26:00:00:02:44:33!;84:18:26:00:00:04:a7:c9 ;00:15:8d:00:02:b8:bb:71.;00:15:8d:00:02:c3:af:b1;00:15:8d:00:02:c3:95:8a;00:0d:6f:00:0d:2e:8d:72;00:15:8d:00:02:04:a0:62|;94:10:3e:f6:bf:42:8a:ady;00:15:8d:00:02:36:84:85}5;00:15:8d:00:02:36:91:2f?;00:15:8d:00:02:04:54:405 ;00:0d:6f:00:0c:a7:42:a6YF ;00:15:8d:00:02:b5:f7:cdY&;00:15:8d:00:02:b5:2d:b5k;00:12:4b:00:19:36:95:c1TJ;00:0d:6f:00:0b:12:4b:62;00:0d:6f:00:0b:1c:f1:4a ";00:0d:6f:00:0d:2e:8d:e91;00:0d:6f:ff:fe:7a:d3:7aˣ;00:0d:6f:00:05:76:14:80#;84:18:26:00:00:02:b7:13/  YuA = ) y!% a E } ];84:18:26:00:00:d9:86:e7,;84:18:26:00:00:01:30:50+;00:15:8d:00:01:eb:71:ec-;84:18:26:00:00:00:d1:df&;84:18:26:00:00:00:d0:fa%;84:18:26:00:00:02:44:33$;84:18:26:00:00:02:b7:13#;00:0d:6f:00:0d:2e:8d:e9";84:18:26:00:00:04:a7:c9!;00:15:8d:00:02:05:a6:41);00:15:8d:00:02:c3:af:b1;00:15:8d:00:02:c3:95:8a;00:0d:6f:00:0d:2e:8d:72;94:10:3e:f6:bf:42:8a:ad;00:15:8d:00:02:36:84:85;00:15:8d:00:02:36:91:2f;00:15:8d:00:02:04:54:40;00:0d:6f:00:0c:a7:42:a6 ;00:15:8d:00:02:04:a0:62;00:15:8d:00:02:b5:f7:cd;00:15:8d:00:02:b5:2d:b5;00:12:4b:00:19:36:95:c1;00:0d:6f:00:0b:12:4b:62;00:0d:6f:00:0b:1c:f1:4a;00:15:8d:00:02:b8:bb:71 ;00:0d:6f:ff:fe:7a:d3:7a;00:0d:6f:00:05:76:14:80 u! -qO*zW5 c  @ p L (  Q - - -"0; 84:18:26:00:00:d9:86:e7"/; 84:18:26:00:00:01:30:50H!1;  00:15:8d:00:01:eb:71:ec_"(; 84:18:26:00:00:00:d1:df"'; 84:18:26:00:00:00:d0:fa"&; 84:18:26:00:00:02:44:33"%; 84:18:26:00:00:02:b7:13"$; 00:0d:6f:00:0d:2e:8d:e9 +; 00:15:8d:00:02:05:a6:41!;  00:15:8d:00:02:c3:af:b1_!;  00:15:8d:00:02:c3:95:8a_"; 00:0d:6f:00:0d:2e:8d:72 !;  00:0d:6f:00:0d:2e:8d:72!;  94:10:3e:f6:bf:42:8a:ad!;  00:15:8d:00:02:36:84:85!;  00:15:8d:00:02:36:91:2f!;  00:15:8d:00:02:04:54:40_ ;  00:0d:6f:00:0c:a7:42:a6Q!;  00:15:8d:00:02:04:a0:62_"; 00:15:8d:00:02:b5:f7:cd_ ;  00:15:8d:00:02:b5:f7:cd ! ;  00:15:8d:00:02:b5:2d:b5_" ; 00:12:4b:00:19:36:95:c1! ;  00:0d:6f:00:0b:12:4b:62! ;  00:0d:6f:00:0b:1c:f1:4a"!; 00:15:8d:00:02:b8:bb:71_ ; 00:15:8d:00:02:b8:bb:71 #; 00:0d:6f:ff:fe:7a:d3:7aa ;  00:0d:6f:ff:fe:7a:d3:7a"; 00:0d:6f:00:05:76:14:80 !;  00:0d:6f:00:05:76:14:80!#;  00:0d:6f:00:0d:2e:8d:e9""; 84:18:26:00:00:04:a7:c9 2! ' x kL n D ! a/ [ >   ;84:18:26:00:00:d9:86:e70;84:18:26:00:00:01:30:50/<; 00:15:8d:00:01:eb:71:ec1;84:18:26:00:00:00:d1:df(;84:18:26:00:00:00:d0:fa';84:18:26:00:00:02:44:33&;84:18:26:00:00:02:b7:13%;00:0d:6f:00:0d:2e:8d:e9$; 00:15:8d:00:02:05:a6:41+; 00:15:8d:00:02:c3:af:b1; 00:15:8d:00:02:c3:95:8a;00:0d:6f:00:0d:2e:8d:72; 00:0d:6f:00:0d:2e:8d:72; 94:10:3e:f6:bf:42:8a:ad; 00:15:8d:00:02:36:84:85; 00:15:8d:00:02:36:91:2f; 00:15:8d:00:02:04:54:40; 00:0d:6f:00:0c:a7:42:a6; 00:15:8d:00:02:04:a0:62;00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:2d:b5 ;00:12:4b:00:19:36:95:c1 ; 00:0d:6f:00:0b:12:4b:62 ; 00:0d:6f:00:0b:1c:f1:4a ;00:15:8d:00:02:b8:bb:71!; 00:15:8d:00:02:b8:bb:71 ;00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a;00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:0d:2e:8d:e9#;84:18:26:00:00:04:a7:c9"; 00:15:8d:00:02:b5:2d:b52 P=hH d D ' q R 3  ~ ` @ "  n N 1  + uV7 x Y : 1X;84:18:26:00:00:d9:86:e7W;84:18:26:00:00:01:30:50Y; 00:15:8d:00:01:eb:71:ecG;84:18:26:00:00:00:d1:dfF;84:18:26:00:00:00:d0:faE;84:18:26:00:00:02:44:33D;84:18:26:00:00:02:b7:13C;00:0d:6f:00:0d:2e:8d:e9B; 00:0d:6f:00:0d:2e:8d:e9A;84:18:26:00:00:04:a7:c94; 00:15:8d:00:02:c3:af:b13; 00:15:8d:00:02:c3:af:b12; 00:15:8d:00:02:c3:af:b11; 00:15:8d:00:02:c3:95:8a0; 00:15:8d:00:02:c3:95:8a/; 00:15:8d:00:02:c3:95:8a.;00:0d:6f:00:0d:2e:8d:72-; 00:0d:6f:00:0d:2e:8d:726; 94:10:3e:f6:bf:42:8a:ad+; 00:15:8d:00:02:36:84:85*; 00:15:8d:00:02:36:91:2f); 00:15:8d:00:02:04:54:40(; 00:15:8d:00:02:04:54:40'; 00:15:8d:00:02:04:54:40&; 00:0d:6f:00:0c:a7:42:a6%; 00:15:8d:00:02:04:a0:62$; 00:15:8d:00:02:04:a0:62#; 00:15:8d:00:02:04:a0:62";00:15:8d:00:02:b5:f7:cd!;00:15:8d:00:02:b5:f7:cd ;00:15:8d:00:02:b5:f7:cd;00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:2d:b5; 00:15:8d:00:02:b5:2d:b5; 00:15:8d:00:02:b5:2d:b5;00:12:4b:00:19:36:95:c1;00:12:4b:00:19:36:95:c1; 00:0d:6f:00:0b:12:4b:62; 00:0d:6f:00:0b:1c:f1:4a@;00:15:8d:00:02:b8:bb:71?;00:15:8d:00:02:b8:bb:71>;00:15:8d:00:02:b8:bb:71=;00:15:8d:00:02:b8:bb:71<; 00:15:8d:00:02:b8:bb:71;; 00:15:8d:00:02:b8:bb:71:; 00:15:8d:00:02:b8:bb:719; 00:15:8d:00:02:b8:bb:718; 00:15:8d:00:02:b8:bb:717; 00:15:8d:00:02:b8:bb:71;00:0d:6f:ff:fe:7a:d3:7a!; 00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a ; 00:0d:6f:ff:fe:7a:d3:7a;00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80 = w } bdC  Y :  n O .  t 4 T  %Ii) _ @    " B  ;84:18:26:00:00:d9:86:e7X;84:18:26:00:00:01:30:50W; 00:15:8d:00:01:eb:71:ecY;84:18:26:00:00:00:d1:dfG;84:18:26:00:00:00:d0:faF;84:18:26:00:00:02:44:33E;84:18:26:00:00:02:b7:13D;00:0d:6f:00:0d:2e:8d:e9C; 00:0d:6f:00:0d:2e:8d:e9B;84:18:26:00:00:04:a7:c9A ; 00:15:8d:00:02:c3:af:b14; 00:15:8d:00:02:c3:af:b13; 00:15:8d:00:02:c3:af:b12 ; 00:15:8d:00:02:c3:95:8a1; 00:15:8d:00:02:c3:95:8a0; 00:15:8d:00:02:c3:95:8a/;00:0d:6f:00:0d:2e:8d:72.; 00:0d:6f:00:0d:2e:8d:72-; 94:10:3e:f6:bf:42:8a:ad6; 00:15:8d:00:02:36:84:85+; 00:15:8d:00:02:36:91:2f* ; 00:15:8d:00:02:04:54:40); 00:15:8d:00:02:04:54:40(; 00:15:8d:00:02:04:54:40'; 00:0d:6f:00:0c:a7:42:a6& ; 00:15:8d:00:02:04:a0:62%; 00:15:8d:00:02:04:a0:62$; 00:15:8d:00:02:04:a0:62#;00:15:8d:00:02:b5:f7:cd";00:15:8d:00:02:b5:f7:cd!;00:15:8d:00:02:b5:f7:cd ;00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd; 00:15:8d:00:02:b5:f7:cd ; 00:15:8d:00:02:b5:2d:b5; 00:15:8d:00:02:b5:2d:b5; 00:15:8d:00:02:b5:2d:b5;00:12:4b:00:19:36:95:c1;00:12:4b:00:19:36:95:c1; 00:0d:6f:00:0b:12:4b:62; 00:0d:6f:00:0b:1c:f1:4a;00:15:8d:00:02:b8:bb:71@;00:15:8d:00:02:b8:bb:71?;00:15:8d:00:02:b8:bb:71>;00:15:8d:00:02:b8:bb:71=; 00:15:8d:00:02:b8:bb:71<; 00:15:8d:00:02:b8:bb:71;; 00:15:8d:00:02:b8:bb:71:; 00:15:8d:00:02:b8:bb:719; 00:15:8d:00:02:b8:bb:718; 00:15:8d:00:02:b8:bb:717 ;00:0d:6f:ff:fe:7a:d3:7a!; 00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a ; 00:0d:6f:ff:fe:7a:d3:7a;00:0d:6f:00:05:76:14:80;  00:0d:6f:00:05:76:14:80 a ";84:18:26:00:00:00:d0:fa!9 ; 00:15:8d:00:02:04:a0:62 H^0`7 { V +  b A  m G %  w V  O oM*{H$}[$\1[8pL)f>pgg"-;84:18:26:00:00:04:a7:c9Nv; h00:15:8d:00:02:b8:bb:71! (!C!$! !FY!(!%M $|; 00:15:8d:00:02:b8:bb:71; 00:15:8d:00:02:b8:bb:71; 00:15:8d:00:02:b5:f7:cd0'; 000:15:8d:00:02:b5:f7:cdlumi.vibration.aq10(; 000:15:8d:00:02:b8:bb:71lumi.vibration.aq1'$; 00:15:8d:00:02:b8:bb:71 O""; 00:15:8d:00:02:b8:bb:71;  00:15:8d:00:02:b8:bb:71 ;  00:15:8d:00:02:b5:f7:cd !j; 00:15:8d:00:02:b5:f7:cd3{; 600:15:8d:00:02:36:84:85lumi.sensor_wleak.aq13n; 600:15:8d:00:02:36:91:2flumi.sensor_wleak.aq1+;  00:15:8d:00:02:b5:f7:cd45; 800:15:8d:00:02:c3:95:8alumi.sensor_magnet.aq2"U; 00:15:8d:00:02:c3:af:b1LUMI@; 00:15:8d:00:02:c3:95:8a"<; 00:15:8d:00:02:c3:95:8aLUMI ^;  00:0d:6f:00:0d:2e:8d:72/\;,00:0d:6f:00:0d:2e:8d:72Contact Sensor-A)[; 00:0d:6f:00:0d:2e:8d:72CentraLite.Z; ,00:0d:6f:00:0d:2e:8d:72Contact Sensor-A(Y;  00:0d:6f:00:0d:2e:8d:72CentraLite!; 00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80!; 00:0d:6f:00:05:76:14:80 j; 00:0d:6f:00:05:76:14:80>M;  00:0d:6f:00:05:76:14:80!J; 00:0d:6f:00:05:76:14:80$I; 00:0d:6f:00:05:76:14:801 G;00:0d:6f:00:05:76:14:801c; 100:15:8d:00:02:b5:f7:cdbattery_voltage_mV 1 ; 100:15:8d:00:02:36:84:85battery_voltage_mV 1s; 100:15:8d:00:02:36:91:2fbattery_voltage_mV 1H; 100:15:8d:00:02:04:54:40battery_voltage_mV 1 ; 100:15:8d:00:02:04:a0:62battery_voltage_mV 4/; 800:15:8d:00:02:04:a0:62lumi.sensor_magnet.aq2';  00:15:8d:00:02:04:a0:621$; 100:15:8d:00:02:b5:2d:b5battery_voltage_mV ;  00:15:8d:00:02:b5:2d:b54; 800:15:8d:00:02:b5:2d:b5lumi.sensor_magnet.aq2h; 00:15:8d:00:02:04:54:40T; 00:0d:6f:00:05:76:14:80#3; 94:10:3e:f6:bf:42:8a:adMZ100"2; 94:10:3e:f6:bf:42:8a:adMRVLq; 00:15:8d:00:02:36:84:85 m;  00:15:8d:00:02:36:84:85*"k; 00:15:8d:00:02:36:84:85LUMIZ; 00:15:8d:00:02:36:91:2f U;  00:15:8d:00:02:36:91:2f*L;  00:15:8d:00:02:36:91:2f"H; 00:15:8d:00:02:36:91:2fLUMI{;  00:15:8d:00:02:04:54:404z; 800:15:8d:00:02:04:54:40lumi.sensor_magnet.aq2"[; 00:15:8d:00:02:04:54:40LUMI$ ; 00:0d:6f:00:0c:a7:42:a63210-L(;  00:0d:6f:00:0c:a7:42:a6CentraLite"; 00:15:8d:00:02:04:a0:62LUMI"; 00:15:8d:00:02:b5:f7:cdLUMI"{; 00:15:8d:00:02:b5:2d:b5LUMI*R;"00:12:4b:00:19:36:95:c1lumi.router#Q;00:12:4b:00:19:36:95:c1LUMI ;;  00:0d:6f:00:0b:12:4b:62'.; 00:0d:6f:00:0b:12:4b:62MCT-340 E%-; 00:0d:6f:00:0b:12:4b:62Visonic %;  00:0d:6f:00:0b:1c:f1:4a'#; 00:0d:6f:00:0b:1c:f1:4aMCT-340 E%"; 00:0d:6f:00:0b:1c:f1:4aVisonic5; :00:0d:6f:ff:fe:7a:d3:7aTRADFRI signal repeater,; (00:0d:6f:ff:fe:7a:d3:7aIKEA of Sweden%;00:0d:6f:00:05:76:14:803320-L); 00:0d:6f:00:05:76:14:80CentraLite$; 00:0d:6f:00:05:76:14:803320-L(;  00:0d:6f:00:05:76:14:80CentraLite ErP-d LoK'|O G xr@oG wS-sKw P l ! p M * * * * * * * * * * * * * * *nI_;U/!6;84:18:26:00:00:02:b7:13''O; 00:15:8d:00:02:b5:f7:cdK"N; 00:15:8d:00:02:b5:f7:cd0!M; 00:15:8d:00:02:b5:f7:cdU; 00:15:8d:00:02:b5:f7:cd$*; 00:15:8d:00:02:b5:f7:cd ; 84:18:26:00:00:02:b7:13#;84:18:26:00:00:02:b7:13r9;@84:18:26:00:00:02:b7:13LIGHTIFY A19 Tunable White$;84:18:26:00:00:02:b7:13OSRAMF <;84:18:26:00:00:02:b7:13 S;  00:15:8d:00:02:b8:bb:71U!; 00:15:8d:00:02:b5:f7:cd ; 00:15:8d:00:02:b5:f7:cd  ; 00:15:8d:00:02:b5:f7:cd!N ; h00:15:8d:00:02:b5:f7:cd! (!3!$! !FY!(!%K ##;84:18:26:00:00:00:d1:dfr$;84:18:26:00:00:00:d1:dfOSRAM!;84:18:26:00:00:00:d0:fa w"#<;84:18:26:00:00:00:d0:far99;@84:18:26:00:00:00:d0:faLIGHTIFY A19 Tunable White$8;84:18:26:00:00:00:d0:faOSRAM$*;84:18:26:00:00:02:44:33  %!$;84:18:26:00:00:02:44:33#; 84:18:26:00:00:02:44:33!;84:18:26:00:00:02:44:33!;84:18:26:00:00:02:44:33"N;84:18:26:00:00:02:44:33 J;84:18:26:00:00:02:44:33I; 84:18:26:00:00:02:44:33"F;84:18:26:00:00:02:44:335; 84:18:26:00:00:02:44:33#4;84:18:26:00:00:02:44:33r93;@84:18:26:00:00:02:44:33LIGHTIFY A19 Tunable White$2;84:18:26:00:00:02:44:33OSRAM$;84:18:26:00:00:02:b7:13  %!1;84:18:26:00:00:02:b7:130; 84:18:26:00:00:02:b7:13!B;84:18:26:00:00:02:b7:133; 84:18:26:00:00:04:a7:c9 2;84:18:26:00:00:04:a7:c9 0;84:18:26:00:00:04:a7:c9"-;84:18:26:00:00:04:a7:c99+;@84:18:26:00:00:04:a7:c9LIGHTIFY A19 Tunable White$*;84:18:26:00:00:04:a7:c9OSRAMNv; h00:15:8d:00:02:b8:bb:71! (!C!$! !FY!(!%M $|; 00:15:8d:00:02:b8:bb:71 j G!a;84:18:26:00:00:00:d1:df9 ;@84:18:26:00:00:00:d1:dfLIGHTIFY A19 Tunable White#.;84:18:26:00:00:04:a7:c9j; 00:15:8d:00:02:c3:af:b1 ; 00:0d:6f:00:0d:2e:8d:e9>.; 00:15:8d:00:02:36:84:85 Cya; 00:0d:6f:00:0b:12:4b:62`; 00:0d:6f:00:0d:2e:8d:e9!_; 00:0d:6f:00:0d:2e:8d:e9$[; 00:0d:6f:00:0d:2e:8d:e91N; 84:18:26:00:00:00:d0:faI; 00:12:4b:00:19:36:95:c1G; 84:18:26:00:00:04:a7:c9F;  00:0d:6f:00:0c:a7:42:a6[y; 00:0d:6f:00:0d:2e:8d:72>4k; 800:15:8d:00:02:c3:af:b1lumi.sensor_magnet.aq2 ;  00:0d:6f:00:0d:2e:8d:e9/;,00:0d:6f:00:0d:2e:8d:e9Contact Sensor-A); 00:0d:6f:00:0d:2e:8d:e9CentraLite.; ,00:0d:6f:00:0d:2e:8d:e9Contact Sensor-A(;  00:0d:6f:00:0d:2e:8d:e9CentraLite"W;84:18:26:00:00:04:a7:c9 V^=sQ1 f C " z X 6  i G % z X 6  f D !wU3`=vS3h6kI'mI'lH(}[)t#; !; 00:15:8d:00:01:eb:71:ecU!; 00:15:8d:00:01:eb:71:ecU ; 00:15:8d:00:02:04:a0:62;  00:15:8d:00:02:04:a0:62"; 00:15:8d:00:02:04:54:40U1; 100:15:8d:00:02:04:54:40battery_voltage_mV!; 00:15:8d:00:02:04:54:40!U!; 00:15:8d:00:02:04:54:40 U#; 00:15:8d:00:02:04:54:40U ; 00:15:8d:00:02:04:54:40h ; 00:15:8d:00:02:04:54:40 ; 00:15:8d:00:02:04:54:40;  00:15:8d:00:02:04:54:40";00:12:4b:00:19:36:95:c1U";00:12:4b:00:19:36:95:c1K ;00:12:4b:00:19:36:95:c1R ;00:12:4b:00:19:36:95:c1Q!; 00:0d:6f:ff:fe:7a:d3:7aQ; 00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a";00:0d:6f:00:0d:2e:8d:e9K";00:0d:6f:00:0d:2e:8d:e9";00:0d:6f:00:0d:2e:8d:e9#; 00:0d:6f:00:0d:2e:8d:e9K";  00:0d:6f:00:0d:2e:8d:e9"; 00:0d:6f:00:0d:2e:8d:e9K"; 00:0d:6f:00:0d:2e:8d:e9U!; 00:0d:6f:00:0d:2e:8d:e9>B!; 00:0d:6f:00:0d:2e:8d:e91K!; 00:0d:6f:00:0d:2e:8d:e9!Uo!; 00:0d:6f:00:0d:2e:8d:e9 Un!; 00:0d:6f:00:0d:2e:8d:e9K!; 00:0d:6f:00:0d:2e:8d:e9!; 00:0d:6f:00:0d:2e:8d:e9";00:0d:6f:00:0d:2e:8d:72L !;00:0d:6f:00:0d:2e:8d:72r!;00:0d:6f:00:0d:2e:8d:72r#; 00:0d:6f:00:0d:2e:8d:72L !;  00:0d:6f:00:0d:2e:8d:72r"; 00:0d:6f:00:0d:2e:8d:72L "; 00:0d:6f:00:0d:2e:8d:72U!; 00:0d:6f:00:0d:2e:8d:72>E!; 00:0d:6f:00:0d:2e:8d:721L!; 00:0d:6f:00:0d:2e:8d:72!U!; 00:0d:6f:00:0d:2e:8d:72 U!; 00:0d:6f:00:0d:2e:8d:72L ; 00:0d:6f:00:0d:2e:8d:72r ; 00:0d:6f:00:0d:2e:8d:72r$; 00:0d:6f:00:0c:a7:42:a6  U!; 00:0d:6f:00:0c:a7:42:a6U!; 00:0d:6f:00:0c:a7:42:a6K ; 00:0d:6f:00:0c:a7:42:a6 ; 00:0d:6f:00:0c:a7:42:a6#; 00:0d:6f:00:0b:1c:f1:4aK ;  00:0d:6f:00:0b:1c:f1:4a%"; 00:0d:6f:00:0b:1c:f1:4aK"; 00:0d:6f:00:0b:1c:f1:4aU!; 00:0d:6f:00:0b:1c:f1:4a1K!; 00:0d:6f:00:0b:1c:f1:4a!L!; 00:0d:6f:00:0b:1c:f1:4a L!; 00:0d:6f:00:0b:1c:f1:4aK; 00:0d:6f:00:0b:1c:f1:4a#; 00:0d:6f:00:0b:1c:f1:4a"#; 00:0d:6f:00:0b:12:4b:62K ;  00:0d:6f:00:0b:12:4b:62;"; 00:0d:6f:00:0b:12:4b:62K"; 00:0d:6f:00:0b:12:4b:62U!; 00:0d:6f:00:0b:12:4b:621K!; 00:0d:6f:00:0b:12:4b:62!K!; 00:0d:6f:00:0b:12:4b:62 K!; 00:0d:6f:00:0b:12:4b:62K; 00:0d:6f:00:0b:12:4b:62.; 00:0d:6f:00:0b:12:4b:62-!;00:0d:6f:00:05:76:14:80 ;00:0d:6f:00:05:76:14:80 ;00:0d:6f:00:05:76:14:80"; 00:0d:6f:00:05:76:14:80!; 00:0d:6f:00:05:76:14:80!; 00:0d:6f:00:05:76:14:80> ; 00:0d:6f:00:05:76:14:80>7j ; 00:0d:6f:00:05:76:14:801 ; 00:0d:6f:00:05:76:14:80!> ; 00:0d:6f:00:05:76:14:80 > ; 00:0d:6f:00:05:76:14:80T; 00:0d:6f:00:05:76:14:80;  00:0d:6f:00:05:76:14:80 sM Ngpa?O"-`>D #  } [ 9  { X 4  ~ ] ;  b A ) S 1 pMaaaaaaaaaa5~[8d@mK)-!NP!; 00:15:8d:00:02:36:91:2f!U!; 00:15:8d:00:02:36:84:85!T#; 00:15:8d:00:02:36:84:85T!#; 00:15:8d:00:02:36:91:2fU!; 00:15:8d:00:02:c3:af:b1!T#; 00:15:8d:00:02:36:84:85L'"; 00:15:8d:00:02:36:84:85T!; 00:15:8d:00:02:36:84:85 T!; 00:15:8d:00:02:36:91:2f U!; 00:15:8d:00:02:04:a0:62!R7E"; 00:15:8d:00:02:36:91:2fU#; 00:15:8d:00:02:36:91:2fM!; 00:15:8d:00:02:36:91:2fM-"; 00:15:8d:00:02:c3:95:8aU2!; 00:15:8d:00:02:c3:95:8a U1!; 00:15:8d:00:02:c3:95:8a!U0#; 00:15:8d:00:02:c3:95:8aU/"; 00:15:8d:00:02:c3:af:b1T!; 00:15:8d:00:02:c3:af:b1 T#; 00:15:8d:00:02:c3:af:b1T!; 00:15:8d:00:02:36:91:2f Z!; 00:15:8d:00:02:04:a0:62 R1; 100:15:8d:00:02:36:91:2fbattery_voltage_mV!!; 00:15:8d:00:02:04:a0:62N"; 00:15:8d:00:02:04:a0:62R#; 00:15:8d:00:02:04:a0:62R!; 00:15:8d:00:02:36:91:2fjn ; 00:15:8d:00:02:36:91:2f H;  00:15:8d:00:02:36:91:2f L#; 00:15:8d:00:02:b5:2d:b5R!;  00:15:8d:00:02:36:84:85 m!; 00:15:8d:00:02:36:84:85 q!; 00:15:8d:00:02:b5:2d:b5!R1; 100:15:8d:00:02:36:84:85battery_voltage_mV !; 00:15:8d:00:02:b5:2d:b5N"; 00:15:8d:00:02:b5:2d:b5R!; 00:15:8d:00:02:b5:2d:b5 R!; 00:15:8d:00:02:36:84:85A!; 00:15:8d:00:02:36:84:85j{ ; 00:15:8d:00:02:36:84:85 k#1; 100:15:8d:00:02:04:a0:62battery_voltage_mV h";84:18:26:00:00:00:d0:fa!8 t!; 00:15:8d:00:02:c3:af:b1 !; 00:15:8d:00:02:c3:af:b1 ; 00:15:8d:00:02:c3:af:b1sU ; 00:15:8d:00:02:c3:95:8as@ ; 00:15:8d:00:02:c3:95:8av ; 00:15:8d:00:02:c3:95:8as<$; 00:15:8d:00:02:b8:bb:71$; 00:15:8d:00:02:b8:bb:71|$; 00:15:8d:00:02:b8:bb:71#; 00:15:8d:00:02:b8:bb:71US#; 00:15:8d:00:02:b8:bb:71v!; 00:15:8d:00:02:b8:bb:71T!; 00:15:8d:00:02:b8:bb:71 ;  00:15:8d:00:02:b8:bb:71"; 00:15:8d:00:02:b5:f7:cd$; 00:15:8d:00:02:b5:f7:cdO$; 00:15:8d:00:02:b5:f7:cd*$; 00:15:8d:00:02:b5:f7:cdN#; 00:15:8d:00:02:b5:f7:cdUM#; 00:15:8d:00:02:b5:f7:cdj"; 00:15:8d:00:02:b5:f7:cd1; 100:15:8d:00:02:b5:f7:cdbattery_voltage_mVc!; 00:15:8d:00:02:b5:f7:cd!!; 00:15:8d:00:02:b5:f7:cd #; 00:15:8d:00:02:b5:f7:cd #; 00:15:8d:00:02:b5:f7:cd !; 00:15:8d:00:02:b5:f7:cdT!; 00:15:8d:00:02:b5:f7:cd' ; 00:15:8d:00:02:b5:f7:cd ;  00:15:8d:00:02:b5:f7:cdj+#1; 100:15:8d:00:02:b5:2d:b5battery_voltage_mV ; 00:15:8d:00:02:b5:2d:b5; 00:15:8d:00:02:b5:2d:b5{;  00:15:8d:00:02:b5:2d:b5E$!;  00:15:8d:00:02:36:91:2f U yWgG' X w 8 2 q Q  % iI(aA"Z;xX7}hI^>.1Pp`A  MMMMMM k L , c#B dC"y<[z!;00:0d:6f:00:0d:2e:8d:e9  ;00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9;00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9  ; 00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9 ; 00:15:8d:00:02:36:84:85O ; 00:15:8d:00:02:04:a0:62=; 00:15:8d:00:02:04:a0:62>; 00:15:8d:00:02:04:a0:62<; 00:15:8d:00:02:04:a0:62; ; 00:15:8d:00:02:04:54:40J; 00:15:8d:00:02:04:54:40K; 00:15:8d:00:02:04:54:40I; 00:15:8d:00:02:04:54:40H;00:12:4b:00:19:36:95:c10;00:12:4b:00:19:36:95:c1/ ;00:0d:6f:ff:fe:7a:d3:7a! ; 00:0d:6f:ff:fe:7a:d3:7a|; 00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a ; 00:0d:6f:ff:fe:7a:d3:7a ";00:0d:6f:00:0d:2e:8d:e9!;00:0d:6f:00:0d:2e:8d:72d ;00:0d:6f:00:0d:2e:8d:72 c;00:0d:6f:00:0d:2e:8d:72b; 00:0d:6f:00:0d:2e:8d:72a;00:0d:6f:00:0d:2e:8d:72`; 00:0d:6f:00:0d:2e:8d:72 _; 00:0d:6f:00:0d:2e:8d:72^; 00:0d:6f:00:0d:2e:8d:72]; 00:0d:6f:00:0d:2e:8d:72 \; 00:0d:6f:00:0d:2e:8d:72[; 00:0d:6f:00:0d:2e:8d:72Z; 00:0d:6f:00:0d:2e:8d:72Yk; 00:15:8d:00:01:eb:71:ec; 00:15:8d:00:02:b5:2d:b51 C; 00:15:8d:00:01:eb:71:ec; 00:15:8d:00:01:eb:71:ec; 00:15:8d:00:01:eb:71:ec; 00:15:8d:00:02:36:91:2fM; 00:15:8d:00:02:36:91:2fN; 00:15:8d:00:02:36:91:2fL; 00:15:8d:00:02:36:84:85P; 00:15:8d:00:02:36:84:85Q ; 00:0d:6f:00:0c:a7:42:a6G; 00:0d:6f:00:0c:a7:42:a6 F; 00:0d:6f:00:0c:a7:42:a6 E; 00:0d:6f:00:0c:a7:42:a6D; 00:0d:6f:00:0c:a7:42:a6C; 00:0d:6f:00:0c:a7:42:a6B; 00:0d:6f:00:0c:a7:42:a6A; 00:0d:6f:00:0c:a7:42:a6@; 00:0d:6f:00:0c:a7:42:a6? ; 00:0d:6f:00:0b:12:4b:62 .; 00:0d:6f:00:0b:12:4b:62 -; 00:0d:6f:00:0b:12:4b:62,; 00:0d:6f:00:0b:12:4b:62+; 00:0d:6f:00:0b:12:4b:62*; 00:0d:6f:00:0b:12:4b:62); 00:0d:6f:00:0b:12:4b:62(; 00:0d:6f:00:0b:1c:f1:4a  ; 00:0d:6f:00:0b:1c:f1:4a ; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a]; 00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a !;00:0d:6f:00:05:76:14:80 ;00:0d:6f:00:05:76:14:80  ;00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80 ;00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80;  00:0d:6f:00:05:76:14:80 \ 3  v V 8  { \ =  !|Z8qP. bA M, dAyX7m  ^ =  sxW6c@ U 6  ;84:18:26:00:00:d9:86:e7 ;84:18:26:00:00:d9:86:e7 ;84:18:26:00:00:d9:86:e7 ;84:18:26:00:00:d9:86:e7;84:18:26:00:00:d9:86:e7";84:18:26:00:00:01:30:50!;84:18:26:00:00:01:30:50 !;84:18:26:00:00:01:30:50 ;84:18:26:00:00:01:30:50 ;84:18:26:00:00:01:30:50 ;84:18:26:00:00:01:30:50 ;84:18:26:00:00:01:30:50 ;84:18:26:00:00:01:30:50;84:18:26:00:00:01:30:50 R|";84:18:26:00:00:d9:86:e7!;84:18:26:00:00:d9:86:e7 !;84:18:26:00:00:d9:86:e7 ;84:18:26:00:00:d9:86:e7 ;84:18:26:00:00:d9:86:e7";84:18:26:00:00:00:d1:df!;84:18:26:00:00:00:d1:df !;84:18:26:00:00:00:d1:df ;84:18:26:00:00:00:d1:df ;84:18:26:00:00:00:d1:df ;84:18:26:00:00:00:d1:df ;84:18:26:00:00:00:d1:df ;84:18:26:00:00:00:d1:df;84:18:26:00:00:00:d1:df";84:18:26:00:00:00:d0:fa!;84:18:26:00:00:00:d0:fa !;84:18:26:00:00:00:d0:fa ;84:18:26:00:00:00:d0:fa ;84:18:26:00:00:00:d0:fa ;84:18:26:00:00:00:d0:fa ;84:18:26:00:00:00:d0:fa ;84:18:26:00:00:00:d0:fa;84:18:26:00:00:00:d0:fa";84:18:26:00:00:02:44:33!;84:18:26:00:00:02:44:33 !;84:18:26:00:00:02:44:33 ;84:18:26:00:00:02:44:33 ;84:18:26:00:00:02:44:33 ;84:18:26:00:00:02:44:33 ;84:18:26:00:00:02:44:33 ;84:18:26:00:00:02:44:33;84:18:26:00:00:02:44:33";84:18:26:00:00:02:b7:13!;84:18:26:00:00:02:b7:13 !;84:18:26:00:00:02:b7:13 ;84:18:26:00:00:02:b7:13 ;84:18:26:00:00:02:b7:13 ;84:18:26:00:00:02:b7:13 ;84:18:26:00:00:02:b7:13 ;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13 ; 94:10:3e:f6:bf:42:8a:adz; 94:10:3e:f6:bf:42:8a:adw; 94:10:3e:f6:bf:42:8a:adv; 94:10:3e:f6:bf:42:8a:adx; 94:10:3e:f6:bf:42:8a:ady; 94:10:3e:f6:bf:42:8a:adu; 94:10:3e:f6:bf:42:8a:adt";84:18:26:00:00:04:a7:c9!;84:18:26:00:00:04:a7:c9 !;84:18:26:00:00:04:a7:c9 ;84:18:26:00:00:04:a7:c9 ;84:18:26:00:00:04:a7:c9 ;84:18:26:00:00:04:a7:c9 ;84:18:26:00:00:04:a7:c9 ;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9 ; 00:15:8d:00:02:c3:af:b1k; 00:15:8d:00:02:c3:af:b1l; 00:15:8d:00:02:c3:af:b1j; 00:15:8d:00:02:c3:af:b1i ; 00:15:8d:00:02:c3:95:8ag; 00:15:8d:00:02:c3:95:8ah; 00:15:8d:00:02:c3:95:8af; 00:15:8d:00:02:c3:95:8ae ;00:15:8d:00:02:b8:bb:71;00:15:8d:00:02:b8:bb:71; 00:15:8d:00:02:b8:bb:71~; 00:15:8d:00:02:b8:bb:71}; 00:15:8d:00:02:b8:bb:71|; 00:15:8d:00:02:b8:bb:71{;00:15:8d:00:02:b5:f7:cd:;00:15:8d:00:02:b5:f7:cd9; 00:15:8d:00:02:b5:f7:cd8; 00:15:8d:00:02:b5:f7:cd7; 00:15:8d:00:02:b5:f7:cd6; 00:15:8d:00:02:b5:f7:cd5 ; 00:15:8d:00:02:b5:2d:b53; 00:15:8d:00:02:b5:2d:b54 } kL-tV8 n P 1  } ^ @ !  m O 0  y \ > gJ, y[>N1z\=fH( !k  a B # jJ)lL, kL, ;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13;00:0d:6f:00:0d:2e:8d:e9;00:0d:6f:00:0d:2e:8d:e9 ;00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9;00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9; 00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9  ; 00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9 ; 00:0d:6f:00:0d:2e:8d:e9 ;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9 ;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9;84:18:26:00:00:04:a7:c9;00:15:8d:00:02:b8:bb:71l; 00:15:8d:00:02:c3:af:b1k; 00:15:8d:00:02:c3:af:b1j; 00:15:8d:00:02:c3:af:b1i; 00:15:8d:00:02:c3:af:b1h; 00:15:8d:00:02:c3:95:8ag; 00:15:8d:00:02:c3:95:8af; 00:15:8d:00:02:c3:95:8ae; 00:15:8d:00:02:c3:95:8ad;00:0d:6f:00:0d:2e:8d:72c;00:0d:6f:00:0d:2e:8d:72 b;00:0d:6f:00:0d:2e:8d:72a; 00:0d:6f:00:0d:2e:8d:72`;00:0d:6f:00:0d:2e:8d:72_; 00:0d:6f:00:0d:2e:8d:72 ^; 00:0d:6f:00:0d:2e:8d:72]; 00:0d:6f:00:0d:2e:8d:72\; 00:0d:6f:00:0d:2e:8d:72 [; 00:0d:6f:00:0d:2e:8d:72Z; 00:0d:6f:00:0d:2e:8d:72Y; 00:0d:6f:00:0d:2e:8d:72z; 94:10:3e:f6:bf:42:8a:ady; 94:10:3e:f6:bf:42:8a:adx; 94:10:3e:f6:bf:42:8a:adw; 94:10:3e:f6:bf:42:8a:adv; 94:10:3e:f6:bf:42:8a:adu; 94:10:3e:f6:bf:42:8a:adt; 94:10:3e:f6:bf:42:8a:adQ; 00:15:8d:00:02:36:84:85P; 00:15:8d:00:02:36:84:85O; 00:15:8d:00:02:36:84:85N; 00:15:8d:00:02:36:91:2fM; 00:15:8d:00:02:36:91:2fL; 00:15:8d:00:02:36:91:2fK; 00:15:8d:00:02:04:54:40J; 00:15:8d:00:02:04:54:40I; 00:15:8d:00:02:04:54:40H; 00:15:8d:00:02:04:54:40G; 00:0d:6f:00:0c:a7:42:a6F; 00:0d:6f:00:0c:a7:42:a6 E; 00:0d:6f:00:0c:a7:42:a6 D; 00:0d:6f:00:0c:a7:42:a6C; 00:0d:6f:00:0c:a7:42:a6B; 00:0d:6f:00:0c:a7:42:a6A; 00:0d:6f:00:0c:a7:42:a6@; 00:0d:6f:00:0c:a7:42:a6?; 00:0d:6f:00:0c:a7:42:a6>; 00:15:8d:00:02:04:a0:62=; 00:15:8d:00:02:04:a0:62<; 00:15:8d:00:02:04:a0:62;; 00:15:8d:00:02:04:a0:62:;00:15:8d:00:02:b5:f7:cd9;00:15:8d:00:02:b5:f7:cd8; 00:15:8d:00:02:b5:f7:cd7; 00:15:8d:00:02:b5:f7:cd6; 00:15:8d:00:02:b5:f7:cd5; 00:15:8d:00:02:b5:f7:cd4; 00:15:8d:00:02:b5:2d:b53; 00:15:8d:00:02:b5:2d:b52; 00:15:8d:00:02:b5:2d:b51; 00:15:8d:00:02:b5:2d:b50;00:12:4b:00:19:36:95:c1/;00:12:4b:00:19:36:95:c1.; 00:0d:6f:00:0b:12:4b:62 -; 00:0d:6f:00:0b:12:4b:62 ,; 00:0d:6f:00:0b:12:4b:62+; 00:0d:6f:00:0b:12:4b:62*; 00:0d:6f:00:0b:12:4b:62); 00:0d:6f:00:0b:12:4b:62(; 00:0d:6f:00:0b:12:4b:62 ; 00:0d:6f:00:0b:1c:f1:4a ; 00:0d:6f:00:0b:1c:f1:4a ; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a; 00:0d:6f:00:0b:1c:f1:4a;84:18:26:00:00:04:a7:c9;00:15:8d:00:02:b8:bb:71~; 00:15:8d:00:02:b8:bb:71}; 00:15:8d:00:02:b8:bb:71|; 00:15:8d:00:02:b8:bb:71{; 00:15:8d:00:02:b8:bb:71;00:0d:6f:ff:fe:7a:d3:7a!; 00:0d:6f:ff:fe:7a:d3:7a|; 00:0d:6f:ff:fe:7a:d3:7a; 00:0d:6f:ff:fe:7a:d3:7a ; 00:0d:6f:ff:fe:7a:d3:7a ; 00:0d:6f:ff:fe:7a:d3:7a ; 00:0d:6f:ff:fe:7a:d3:7a ;00:0d:6f:00:05:76:14:80 ;00:0d:6f:00:05:76:14:80  ;00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80;00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80 ; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80; 00:0d:6f:00:05:76:14:80 7q^<}\; z Z 9  w W 7  x X 8  u U 5 q w YqqqqqT;84:18:26:00:00:d9:86:e7S;84:18:26:00:00:d9:86:e7 R;84:18:26:00:00:d9:86:e7Q;84:18:26:00:00:d9:86:e7P;84:18:26:00:00:d9:86:e7O;84:18:26:00:00:d9:86:e7N;84:18:26:00:00:d9:86:e7M;84:18:26:00:00:d9:86:e7L;84:18:26:00:00:d9:86:e7K;84:18:26:00:00:01:30:50J;84:18:26:00:00:01:30:50 I;84:18:26:00:00:01:30:50H;84:18:26:00:00:01:30:50G;84:18:26:00:00:01:30:50F;84:18:26:00:00:01:30:50E;84:18:26:00:00:01:30:50D;84:18:26:00:00:01:30:50C;84:18:26:00:00:01:30:50X; 00:15:8d:00:01:eb:71:ecW; 00:15:8d:00:01:eb:71:ecV; 00:15:8d:00:01:eb:71:ecU; 00:15:8d:00:01:eb:71:ec9;84:18:26:00:00:00:d1:df8;84:18:26:00:00:00:d1:df 7;84:18:26:00:00:00:d1:df6;84:18:26:00:00:00:d1:df5;84:18:26:00:00:00:d1:df4;84:18:26:00:00:00:d1:df3;84:18:26:00:00:00:d1:df2;84:18:26:00:00:00:d1:df1;84:18:26:00:00:00:d1:df0;84:18:26:00:00:00:d0:fa/;84:18:26:00:00:00:d0:fa .;84:18:26:00:00:00:d0:fa-;84:18:26:00:00:00:d0:fa,;84:18:26:00:00:00:d0:fa+;84:18:26:00:00:00:d0:fa*;84:18:26:00:00:00:d0:fa);84:18:26:00:00:00:d0:fa(;84:18:26:00:00:00:d0:fa';84:18:26:00:00:02:44:33&;84:18:26:00:00:02:44:33 %;84:18:26:00:00:02:44:33$;84:18:26:00:00:02:44:33#;84:18:26:00:00:02:44:33";84:18:26:00:00:02:44:33!;84:18:26:00:00:02:44:33 ;84:18:26:00:00:02:44:33;84:18:26:00:00:02:44:33;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13 ;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13;84:18:26:00:00:02:b7:13 ^ v S 0  x U +  y Z3c<OxS O+hI$ 4  @  x o L 'lI$.P!^,E*!; 00:15:8d:00:02:04:54:40; 00:15:8d:00:02:04:54:40  B; 00:15:8d:00:02:04:a0:62!=A; F00:15:8d:00:02:04:a0:62! (!!?$ !d+$A;84:18:26:00:00:00:d1:df  %$;84:18:26:00:00:00:d0:fa  %; 00:15:8d:00:02:04:a0:62!&; 00:0d:6f:00:0d:2e:8d:e9KB!; 00:0d:6f:00:0b:1c:f1:4a!8;84:18:26:00:00:00:d0:fa7; 84:18:26:00:00:00:d0:fa$M;84:18:26:00:00:04:a7:c9  %!!; 00:0d:6f:00:0b:12:4b:62K;00:12:4b:00:19:36:95:c1"N; 00:0d:6f:00:0c:a7:42:a6  A*; N00:15:8d:00:02:36:91:2f! (!!Q$! !d!;84:18:26:00:00:00:d1:df; 84:18:26:00:00:00:d1:df ; 00:0d:6f:00:0d:2e:8d:72!; 00:0d:6f:00:0d:2e:8d:72 !s;84:18:26:00:00:04:a7:c9r;84:18:26:00:00:04:a7:c9 o; 00:0d:6f:00:0d:2e:8d:e9!n; 00:0d:6f:00:0d:2e:8d:e9 !{;84:18:26:00:00:01:30:50u; 84:18:26:00:00:01:30:50!J; 00:0d:6f:00:0d:2e:8d:72 +; 00:15:8d:00:02:36:91:2f!B;  00:0d:6f:00:0c:a7:42:a6 h; 00:15:8d:00:02:b5:2d:b5!,; 00:15:8d:00:02:36:91:2f  ; 00:15:8d:00:02:36:91:2f; 00:15:8d:00:02:36:91:2f4.2; ,00:15:8d:00:01:eb:71:eclumi.sensor_swit"1; 00:15:8d:00:01:eb:71:ecLUMI!-; 00:15:8d:00:02:36:91:2f!O;84:18:26:00:00:d9:86:e7M; 84:18:26:00:00:d9:86:e7$;84:18:26:00:00:d9:86:e7  %g; 84:18:26:00:00:d9:86:e7#b;84:18:26:00:00:d9:86:e7r9a;@84:18:26:00:00:d9:86:e7LIGHTIFY A19 Tunable White$`;84:18:26:00:00:d9:86:e7OSRAM G;84:18:26:00:00:01:30:50 F;84:18:26:00:00:01:30:50; 84:18:26:00:00:01:30:50#;84:18:26:00:00:01:30:50r9;@84:18:26:00:00:01:30:50LIGHTIFY A19 Tunable White$;84:18:26:00:00:01:30:50OSRAM; 94:10:3e:f6:bf:42:8a:ad c; 94:10:3e:f6:bf:42:8a:adb;  94:10:3e:f6:bf:42:8a:ad!2; 00:15:8d:00:02:c3:95:8a 1; 00:15:8d:00:02:c3:95:8a  0; 00:15:8d:00:02:c3:95:8a!=/; F00:15:8d:00:02:c3:95:8a! (!!~$ !JTd!@; 00:15:8d:00:02:c3:af:b1?; 00:15:8d:00:02:c3:af:b1  >; 00:15:8d:00:02:c3:af:b1!==; F00:15:8d:00:02:c3:af:b1! (!!$ !JTd1; 84:18:26:00:00:00:d1:df '; 00:15:8d:00:02:36:84:85!(; 00:15:8d:00:02:36:84:85@'; 00:15:8d:00:02:36:84:85  &; 00:15:8d:00:02:36:84:85!A%; N00:15:8d:00:02:36:84:85! (!!0$! !:d ;  00:0d:6f:00:0d:2e:8d:72 ;00:0d:6f:00:0d:2e:8d:72! ; 00:0d:6f:00:0d:2e:8d:72$; 00:0d:6f:00:0d:2e:8d:721; 00:0d:6f:00:0d:2e:8d:72&; 00:0d:6f:00:0b:1c:f1:4a!@V8\); 00:0d:6f:00:0b:1c:f1:4a ;  00:0d:6f:00:0b:1c:f1:4a!~; 00:0d:6f:00:0b:1c:f1:4a |; 00:0d:6f:00:0b:1c:f1:4a1{; 00:0d:6f:00:0b:1c:f1:4a&k; 00:0d:6f:00:0b:12:4b:62!@[ =pj; 00:0d:6f:00:0b:12:4b:62 i;  00:0d:6f:00:0b:12:4b:62 h;00:0d:6f:00:0d:2e:8d:e9!e; 00:0d:6f:00:0b:12:4b:62 d;  00:0d:6f:00:0d:2e:8d:e9b; 00:0d:6f:00:0b:12:4b:621l!j; 00:15:8d:00:02:b5:2d:b5i; 00:15:8d:00:02:b5:2d:b5 &!D; 00:15:8d:00:02:04:a0:62_;  00:0d:6f:ff:fe:7a:d3:7a$u;84:18:26:00:00:01:30:50  %r =g; F00:15:8d:00:02:b5:2d:b5! (!!$ !dP; 00:15:8d:00:02:b5:2d:b5 ; 00:15:8d:00:02:04:54:40!=; F00:15:8d:00:02:04:54:40! (!!$ !:dC; 00:15:8d:00:02:04:a0:62  FBsN(yU0 [8`< ~ [ 8  _ 9  f B  j G $  q L &hB ~%;84:18:26:00:00:d9:86:e7  P";84:18:26:00:00:d9:86:e7MO";84:18:26:00:00:d9:86:e7MM";84:18:26:00:00:d9:86:e7L$;84:18:26:00:00:d9:86:e7L";84:18:26:00:00:d9:86:e7L";84:18:26:00:00:d9:86:e7L#;84:18:26:00:00:01:30:50L#;84:18:26:00:00:01:30:50L";84:18:26:00:00:01:30:50P{%;84:18:26:00:00:01:30:50  P";84:18:26:00:00:01:30:50Pu";84:18:26:00:00:01:30:50L$;84:18:26:00:00:01:30:50L";84:18:26:00:00:01:30:50L";84:18:26:00:00:01:30:50L!; 94:10:3e:f6:bf:42:8a:adQ!; 94:10:3e:f6:bf:42:8a:adQ!; 94:10:3e:f6:bf:42:8a:adL ; 94:10:3e:f6:bf:42:8a:ad3 ; 94:10:3e:f6:bf:42:8a:ad2%;84:18:26:00:00:04:a7:c9  U$;84:18:26:00:00:04:a7:c9 #;84:18:26:00:00:04:a7:c9-#;84:18:26:00:00:04:a7:c90#;84:18:26:00:00:04:a7:c9W#;84:18:26:00:00:04:a7:c92"; 84:18:26:00:00:04:a7:c93";84:18:26:00:00:04:a7:c9Us";84:18:26:00:00:04:a7:c9Ur";84:18:26:00:00:04:a7:c9K";84:18:26:00:00:04:a7:c9";84:18:26:00:00:04:a7:c9%;84:18:26:00:00:02:b7:13   $;84:18:26:00:00:02:b7:13#;84:18:26:00:00:02:b7:13#;84:18:26:00:00:02:b7:13#;84:18:26:00:00:02:b7:13";84:18:26:00:00:02:b7:13";84:18:26:00:00:02:b7:13";84:18:26:00:00:02:b7:13";84:18:26:00:00:02:b7:13";84:18:26:00:00:02:b7:13%;84:18:26:00:00:02:44:33  !*$;84:18:26:00:00:02:44:33 4#;84:18:26:00:00:02:44:33 #;84:18:26:00:00:02:44:33!#;84:18:26:00:00:02:44:33!#;84:18:26:00:00:02:44:33 #;84:18:26:00:00:02:44:33 "; 84:18:26:00:00:02:44:33 ";84:18:26:00:00:02:44:33!$";84:18:26:00:00:02:44:33!#";84:18:26:00:00:02:44:33 5";84:18:26:00:00:02:44:33 3";84:18:26:00:00:02:44:33 2%;84:18:26:00:00:00:d1:df  U$;84:18:26:00:00:00:d1:df!#;84:18:26:00:00:00:d1:df#a";84:18:26:00:00:00:d1:dfU";84:18:26:00:00:00:d1:dfU";84:18:26:00:00:00:d1:dfL1";84:18:26:00:00:00:d1:df!";84:18:26:00:00:00:d1:df!%;84:18:26:00:00:00:d0:fa  P$;84:18:26:00:00:00:d0:fa!<#;84:18:26:00:00:00:d0:fa!";84:18:26:00:00:00:d0:faP8";84:18:26:00:00:00:d0:faP7";84:18:26:00:00:00:d0:faKzigpy-0.62.3/tests/ota_providers/000077500000000000000000000000001456054056700170025ustar00rootroot00000000000000zigpy-0.62.3/tests/ota_providers/test_ota_provider_inovelli.py000066400000000000000000000212151456054056700250120ustar00rootroot00000000000000from unittest import mock import pytest from zigpy.config import CONF_OTA_INOVELLI import zigpy.ota import zigpy.ota.image import zigpy.ota.provider as ota_p from tests.async_mock import AsyncMock, patch INOVELLI_ID = 4655 INOVELLI_IMAGE_TYPE = 257 @pytest.fixture def inovelli_prov(): p = ota_p.Inovelli() p.enable() return p @pytest.fixture def inovelli_image_with_version(): def img(version=0x16908807, image_type=INOVELLI_IMAGE_TYPE): img = zigpy.ota.provider.INOVELLIImage( manufacturer_id=INOVELLI_ID, image_type=image_type, version=version, url=mock.sentinel.url, ) return img return img @pytest.fixture def inovelli_image(inovelli_image_with_version): return inovelli_image_with_version() @pytest.fixture def inovelli_key(): return zigpy.ota.image.ImageKey(INOVELLI_ID, INOVELLI_IMAGE_TYPE) async def test_inovelli_init_ota_dir(inovelli_prov): inovelli_prov.enable = mock.MagicMock() inovelli_prov.refresh_firmware_list = AsyncMock() r = await inovelli_prov.initialize_provider({CONF_OTA_INOVELLI: True}) assert r is None assert inovelli_prov.enable.call_count == 1 assert inovelli_prov.refresh_firmware_list.call_count == 1 async def test_inovelli_get_image_no_cache(inovelli_prov, inovelli_image): inovelli_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) inovelli_prov._cache = mock.MagicMock() inovelli_prov._cache.__getitem__.side_effect = KeyError() inovelli_prov.refresh_firmware_list = AsyncMock() # inovelli manufacturer_id, but not in cache assert inovelli_image.key not in inovelli_prov._cache r = await inovelli_prov.get_image(inovelli_image.key) assert r is None assert inovelli_prov.refresh_firmware_list.call_count == 1 assert inovelli_prov._cache.__getitem__.call_count == 1 assert inovelli_image.fetch_image.call_count == 0 async def test_inovelli_get_image(inovelli_prov, inovelli_key, inovelli_image): inovelli_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) inovelli_prov._cache = mock.MagicMock() inovelli_prov._cache.__getitem__.return_value = inovelli_image inovelli_prov.refresh_firmware_list = AsyncMock() r = await inovelli_prov.get_image(inovelli_key) assert r is mock.sentinel.image assert inovelli_prov._cache.__getitem__.call_count == 1 assert inovelli_prov._cache.__getitem__.call_args[0][0] == inovelli_image.key assert inovelli_image.fetch_image.call_count == 1 @patch("aiohttp.ClientSession.get") async def test_inovelli_refresh_list( mock_get, inovelli_prov, inovelli_image_with_version ): img1 = inovelli_image_with_version(version=0x16908807, image_type=257) img2 = inovelli_image_with_version(version=0x33619975, image_type=258) base = "https://files.inovelli.com/firmware" mock_get.return_value.__aenter__.return_value.json = AsyncMock( side_effect=[ { "VZM31-SN": [ { "version": "0000000B", "channel": "beta", "firmware": f"{base}/VZM31-SN/Beta/1.11/VZM31-SN_1.11.ota", "manufacturer_id": 4655, "image_type": 257, }, { "version": "16842764", "channel": "beta", "firmware": f"{base}/VZM31-SN/Beta/1.12/VZM31-SN_1.12.ota", "manufacturer_id": 4655, "image_type": 257, }, # Reorder these to put the most recent image in the middle { "version": "16908807", "channel": "beta", "firmware": f"{base}/VZM31-SN/Beta/2.07/VZM31-SN_2.07.ota", "manufacturer_id": 4655, "image_type": 257, }, { "version": "16843021", "channel": "beta", "firmware": f"{base}/VZM31-SN/Beta/1.13/VZM31-SN_1.13.ota", "manufacturer_id": 4655, "image_type": 257, }, { "version": "16908805", "channel": "beta", "firmware": f"{base}/VZM31-SN/Beta/2.05/VZM31-SN_2.05.ota", "manufacturer_id": 4655, "image_type": 257, }, { "version": "16908806", "channel": "beta", "firmware": f"{base}/VZM31-SN/Beta/2.06/VZM31-SN_2.06.ota", "manufacturer_id": 4655, "image_type": 257, }, ], "VZM35-SN": [ { "version": "00000004", "channel": "beta", "firmware": f"{base}/VZM35-SN/Beta/1.04/VZM35-SN_1.04.ota", "manufacturer_id": 4655, "image_type": 258, }, # This is reordered as well { "version": "33619975", "channel": "beta", "firmware": f"{base}/VZM35-SN/Beta/1.07/VZM35-SN_1.07.ota", "manufacturer_id": 4655, "image_type": 258, }, { "version": "33619974", "channel": "beta", "firmware": f"{base}/VZM35-SN/Beta/1.06/VZM35-SN_1.06.ota", "manufacturer_id": 4655, "image_type": 258, }, ], } ] ) mock_get.return_value.__aenter__.return_value.status = 202 mock_get.return_value.__aenter__.return_value.reason = "OK" await inovelli_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert len(inovelli_prov._cache) == 2 assert img1.key in inovelli_prov._cache assert img2.key in inovelli_prov._cache cached_1 = inovelli_prov._cache[img1.key] assert cached_1.image_type == img1.image_type # Most recent image is still picked assert cached_1.url == f"{base}/VZM31-SN/Beta/2.07/VZM31-SN_2.07.ota" cached_2 = inovelli_prov._cache[img2.key] assert cached_2.image_type == img2.image_type assert cached_2.url == f"{base}/VZM35-SN/Beta/1.07/VZM35-SN_1.07.ota" assert not inovelli_prov.expired @patch("aiohttp.ClientSession.get") async def test_inovelli_refresh_list_locked( mock_get, inovelli_prov, inovelli_image_with_version ): await inovelli_prov._locks[ota_p.LOCK_REFRESH].acquire() mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) await inovelli_prov.refresh_firmware_list() assert mock_get.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_inovelli_refresh_list_failed(mock_get, inovelli_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" with patch.object(inovelli_prov, "update_expiration") as update_exp: await inovelli_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert update_exp.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_inovelli_fetch_image(mock_get, inovelli_image_with_version): image = zigpy.ota.image.OTAImage( header=zigpy.ota.image.OTAImageHeader( upgrade_file_id=200208670, header_version=256, header_length=56, field_control=zigpy.ota.image.FieldControl(0), manufacturer_id=INOVELLI_ID, image_type=257, file_version=16908807, stack_version=2, header_string="EBL VM_SWITCH", image_size=66, ), subelements=[ zigpy.ota.image.SubElement( tag_id=zigpy.ota.image.ElementTagId.UPGRADE_IMAGE, data=b"abcd" ) ], ) img = inovelli_image_with_version() img.url = mock.sentinel.url mock_get.return_value.__aenter__.return_value.read = AsyncMock( return_value=image.serialize() ) r = await img.fetch_image() assert isinstance(r, zigpy.ota.image.OTAImage) assert mock_get.call_count == 1 assert mock_get.call_args[0][0] == mock.sentinel.url assert r == image zigpy-0.62.3/tests/ota_providers/test_ota_provider_ledvance.py000066400000000000000000000210161456054056700247510ustar00rootroot00000000000000from unittest import mock import pytest from zigpy.config import CONF_OTA_LEDVANCE import zigpy.ota import zigpy.ota.image import zigpy.ota.provider as ota_p from tests.async_mock import AsyncMock, patch from tests.test_ota_image import image # noqa: F401 LEDVANCE_ID = 4489 LEDVANCE_IMAGE_TYPE = 25 @pytest.fixture def ledvance_prov(): p = ota_p.Ledvance() p.enable() return p @pytest.fixture def ledvance_image_with_version(): def img(version=100, image_type=LEDVANCE_IMAGE_TYPE): img = zigpy.ota.provider.LedvanceImage( LEDVANCE_ID, image_type, version, 180052, mock.sentinel.url ) return img return img @pytest.fixture def ledvance_image(ledvance_image_with_version): return ledvance_image_with_version() @pytest.fixture def ledvance_key(): return zigpy.ota.image.ImageKey(LEDVANCE_ID, LEDVANCE_IMAGE_TYPE) async def test_ledvance_init_ota_dir(ledvance_prov): ledvance_prov.enable = mock.MagicMock() ledvance_prov.refresh_firmware_list = AsyncMock() r = await ledvance_prov.initialize_provider({CONF_OTA_LEDVANCE: True}) assert r is None assert ledvance_prov.enable.call_count == 1 assert ledvance_prov.refresh_firmware_list.call_count == 1 async def test_ledvance_get_image_no_cache(ledvance_prov, ledvance_image): ledvance_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) ledvance_prov._cache = mock.MagicMock() ledvance_prov._cache.__getitem__.side_effect = KeyError() ledvance_prov.refresh_firmware_list = AsyncMock() # LEDVANCE manufacturer_id, but not in cache assert ledvance_image.key not in ledvance_prov._cache r = await ledvance_prov.get_image(ledvance_image.key) assert r is None assert ledvance_prov.refresh_firmware_list.call_count == 1 assert ledvance_prov._cache.__getitem__.call_count == 1 assert ledvance_image.fetch_image.call_count == 0 async def test_ledvance_get_image(ledvance_prov, ledvance_key, ledvance_image): ledvance_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) ledvance_prov._cache = mock.MagicMock() ledvance_prov._cache.__getitem__.return_value = ledvance_image ledvance_prov.refresh_firmware_list = AsyncMock() r = await ledvance_prov.get_image(ledvance_key) assert r is mock.sentinel.image assert ledvance_prov._cache.__getitem__.call_count == 1 assert ledvance_prov._cache.__getitem__.call_args[0][0] == ledvance_image.key assert ledvance_image.fetch_image.call_count == 1 @patch("aiohttp.ClientSession.get") async def test_ledvance_refresh_list( mock_get, ledvance_prov, ledvance_image_with_version ): ver1, img_type1 = (0x00102428, 25) ver2, img_type2 = (0x00102428, 13) img1 = ledvance_image_with_version(version=ver1, image_type=img_type1) img2 = ledvance_image_with_version(version=ver2, image_type=img_type2) sha_1 = "ffe0298312f63fa0be5e568886e419d714146652ff4747a8afed2de" fn_1 = "A19 RGBW/00102428/A19_RGBW_IMG0019_00102428-encrypted" sha_2 = "fa5ab550bde3e8c877cf40aa460fc9836405a7843df040e75bfdb2f" fn_2 = "A19 TW 10 year/00102428/A19_TW_10_year_IMG000D_001024" mock_get.return_value.__aenter__.return_value.json = AsyncMock( side_effect=[ { "firmwares": [ { "blob": None, "identity": { "company": 4489, "product": 25, "version": { "major": 1, "minor": 2, "build": 428, "revision": 40, }, }, "releaseNotes": "", "shA256": sha_1, "name": "A19_RGBW_IMG0019_00102428-encrypted.ota", "productName": "A19 RGBW", "fullName": fn_1, "extension": ".ota", "released": "2019-02-28T16:36:28", "salesRegion": "us", "length": 180052, }, { "blob": None, "identity": { "company": 4489, "product": 13, "version": { "major": 1, "minor": 2, "build": 428, "revision": 40, }, }, "releaseNotes": "", "shA256": sha_2, "name": "A19_TW_10_year_IMG000D_00102428-encrypted.ota", "productName": "A19 TW 10 year", "fullName": fn_2, "extension": ".ota", "released": "2019-02-28T16:42:50", "salesRegion": "us", "length": 170800, }, # Old version but shows up after the new version in the OTA list { "blob": None, "identity": { "company": 4489, "product": 13, "version": { "major": 0, "minor": 2, "build": 428, "revision": 40, }, }, "releaseNotes": "", "shA256": sha_2, "name": "A19_TW_10_year_IMG000D_00102428-encrypted.ota", "productName": "A19 TW 10 year", "fullName": fn_2, "extension": ".ota", "released": "2015-02-28T16:42:50", "salesRegion": "us", "length": 170800, }, ] } ] ) mock_get.return_value.__aenter__.return_value.status = 202 mock_get.return_value.__aenter__.return_value.reason = "OK" await ledvance_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert len(ledvance_prov._cache) == 2 assert img1.key in ledvance_prov._cache assert img2.key in ledvance_prov._cache cached_1 = ledvance_prov._cache[img1.key] assert cached_1.image_type == img1.image_type base = "https://api.update.ledvance.com/v1/zigbee/firmwares/download" assert cached_1.url == base + "?Company=4489&Product=25&Version=1.2.428.40" cached_2 = ledvance_prov._cache[img2.key] assert cached_2.image_type == img2.image_type assert cached_2.url == base + "?Company=4489&Product=13&Version=1.2.428.40" assert not ledvance_prov.expired @patch("aiohttp.ClientSession.get") async def test_ledvance_refresh_list_locked( mock_get, ledvance_prov, ledvance_image_with_version ): await ledvance_prov._locks[ota_p.LOCK_REFRESH].acquire() mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) await ledvance_prov.refresh_firmware_list() assert mock_get.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_ledvance_refresh_list_failed(mock_get, ledvance_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" with patch.object(ledvance_prov, "update_expiration") as update_exp: await ledvance_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert update_exp.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_ledvance_fetch_image(mock_get, ledvance_image_with_version): data = bytes.fromhex( "1ef1ee0b0001380000008911012178563412020054657374204f544120496d61" "676500000000000000000000000000000000000042000000" ) sub_el = b"\x00\x00\x04\x00\x00\x00abcd" img = ledvance_image_with_version(image_type=0x2101) img.url = mock.sentinel.url mock_get.return_value.__aenter__.return_value.read = AsyncMock( side_effect=[data + sub_el] ) r = await img.fetch_image() assert isinstance(r, zigpy.ota.image.OTAImage) assert mock_get.call_count == 1 assert mock_get.call_args[0][0] == mock.sentinel.url assert r.serialize() == data + sub_el zigpy-0.62.3/tests/ota_providers/test_ota_provider_remote.py000066400000000000000000000107441456054056700244710ustar00rootroot00000000000000import hashlib import logging from unittest import mock import pytest import zigpy.ota import zigpy.ota.image from zigpy.ota.provider import RemoteImage, RemoteProvider from tests.async_mock import AsyncMock from tests.conftest import make_app MANUFACTURER_ID_1 = 0x1234 MANUFACTURER_ID_2 = 0x5678 IMAGE_TYPE = 0xABCD @pytest.fixture def provider(): p = RemoteProvider( url="https://example.org/ota/", manufacturer_ids=[MANUFACTURER_ID_1, MANUFACTURER_ID_2], ) p.enable() return p @pytest.fixture def ota_image(): img = zigpy.ota.image.OTAImage() img.header = zigpy.ota.image.OTAImageHeader( upgrade_file_id=zigpy.ota.image.OTAImageHeader.MAGIC_VALUE, header_version=256, header_length=56 + 2 + 2, field_control=zigpy.ota.image.FieldControl.HARDWARE_VERSIONS_PRESENT, manufacturer_id=MANUFACTURER_ID_2, image_type=IMAGE_TYPE, file_version=100, stack_version=2, header_string="This is a test header!", image_size=56 + 2 + 4 + 4 + 2 + 2, minimum_hardware_version=1, maximum_hardware_version=3, ) img.subelements = [zigpy.ota.image.SubElement(tag_id=0x0000, data=b"data")] return img @pytest.fixture def image_json(ota_image): return { "binary_url": "https://example.org/ota/image1.ota", "file_version": ota_image.header.file_version, "image_type": ota_image.header.image_type, "manufacturer_id": ota_image.header.manufacturer_id, "changelog": "A changelog would go here.", "checksum": f"sha3-256:{hashlib.sha3_256(ota_image.serialize()).hexdigest()}", "min_hardware_version": ota_image.header.minimum_hardware_version, "max_hardware_version": ota_image.header.maximum_hardware_version, "min_current_file_version": 1, "max_current_file_version": 99, } @mock.patch("aiohttp.ClientSession.get") async def test_remote_image(mock_get, image_json, ota_image, provider, caplog): image = RemoteImage.from_json(image_json) assert image.key == zigpy.ota.image.ImageKey( image.manufacturer_id, image.image_type, ) # Test unsuccessful download rsp = mock_get.return_value.__aenter__.return_value rsp.status = 404 with caplog.at_level(logging.WARNING): await provider.initialize_provider({}) assert "Couldn't download" in caplog.text caplog.clear() # Test successful download rsp.status = 200 rsp.json = AsyncMock(return_value=[image_json]) rsp.read = AsyncMock(return_value=ota_image.serialize()) await provider.initialize_provider({}) new_image = await provider.get_image(image.key) assert new_image == ota_image @mock.patch("aiohttp.ClientSession.get") async def test_remote_image_bad_checksum(mock_get, image_json, ota_image, provider): image = RemoteImage.from_json(image_json) # Corrupt the checksum image_json["checksum"] = f"sha3-256:{hashlib.sha3_256(b'').hexdigest()}" # Test "successful" download rsp = mock_get.return_value.__aenter__.return_value rsp.status = 200 rsp.json = AsyncMock(return_value=[image_json]) rsp.read = AsyncMock(return_value=ota_image.serialize()) await provider.initialize_provider({}) # The image will fail to download with pytest.raises(ValueError) as exc: await provider.get_image(image.key) assert "Image checksum is invalid" in str(exc.value) async def test_get_image_with_no_manufacturer_ids(ota_image, provider): provider.manufacturer_ids = None missing_key = zigpy.ota.image.ImageKey( ota_image.header.manufacturer_id + 1, ota_image.header.image_type + 1, ) assert await provider.filter_get_image(missing_key) is False async def test_provider_initialization(): app = make_app( { "ota": { "remote_providers": [ { "url": "https://fw.zigbee.example.org/ota.json", "manufacturer_ids": [4660, 22136], }, { "url": "https://fw.zigbee.example.org/ota-beta.json", }, ] } } ) listeners, _ = zip(*app._ota._listeners.values()) assert listeners[0].url == "https://fw.zigbee.example.org/ota.json" assert listeners[0].manufacturer_ids == [4660, 22136] assert listeners[1].url == "https://fw.zigbee.example.org/ota-beta.json" assert listeners[1].manufacturer_ids == [] zigpy-0.62.3/tests/ota_providers/test_ota_provider_salus.py000066400000000000000000000140401456054056700243160ustar00rootroot00000000000000import io import tarfile from unittest import mock import pytest from zigpy.config import CONF_OTA_SALUS import zigpy.ota import zigpy.ota.image import zigpy.ota.provider as ota_p from tests.async_mock import AsyncMock, patch from tests.test_ota_image import image # noqa: F401 SALUS_ID = 4216 SALUS_MODEL = "XY123" @pytest.fixture def salus_prov(): p = ota_p.Salus() p.enable() return p @pytest.fixture def salus_image_with_version(): def img(version=100, model=SALUS_MODEL): img = zigpy.ota.provider.SalusImage( SALUS_ID, model, version, 180052, mock.sentinel.url ) return img return img @pytest.fixture def salus_image(salus_image_with_version): return salus_image_with_version() @pytest.fixture def salus_key(): return zigpy.ota.image.ImageKey(SALUS_ID, SALUS_MODEL) async def test_salus_init_ota_dir(salus_prov): salus_prov.enable = mock.MagicMock() salus_prov.refresh_firmware_list = AsyncMock() r = await salus_prov.initialize_provider({CONF_OTA_SALUS: True}) assert r is None assert salus_prov.enable.call_count == 1 assert salus_prov.refresh_firmware_list.call_count == 1 async def test_salus_get_image_no_cache(salus_prov, salus_image): salus_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) salus_prov._cache = mock.MagicMock() salus_prov._cache.__getitem__.side_effect = KeyError() salus_prov.refresh_firmware_list = AsyncMock() # salus manufacturer_id, but not in cache assert salus_image.key not in salus_prov._cache r = await salus_prov.get_image(salus_image.key) assert r is None assert salus_prov.refresh_firmware_list.call_count == 1 assert salus_prov._cache.__getitem__.call_count == 1 assert salus_image.fetch_image.call_count == 0 async def test_salus_get_image(salus_prov, salus_key, salus_image): salus_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) salus_prov._cache = mock.MagicMock() salus_prov._cache.__getitem__.return_value = salus_image salus_prov.refresh_firmware_list = AsyncMock() r = await salus_prov.get_image(salus_key) assert r is mock.sentinel.image assert salus_prov._cache.__getitem__.call_count == 1 assert salus_prov._cache.__getitem__.call_args[0][0] == salus_image.key assert salus_image.fetch_image.call_count == 1 @patch("aiohttp.ClientSession.get") async def test_salus_refresh_list(mock_get, salus_prov, salus_image_with_version): img1 = salus_image_with_version(version="00000006", model="45856") img2 = salus_image_with_version(version="00000006", model="45857") mock_get.return_value.__aenter__.return_value.json = AsyncMock( side_effect=[ { "versions": [ { "model": "45856", "version": "00000006", "url": "http://eu.salusconnect.io/download/firmware/a65779cd-13cd-41e5-a7e0-5346f24a0f62/45856_00000006.tar.gz", }, { "model": "45857", "version": "00000006", "url": "http://eu.salusconnect.io/download/firmware/3319b501-98f3-4337-afbe-8d04bb9938bc/45857_00000006.tar.gz", }, ] } ] ) mock_get.return_value.__aenter__.return_value.status = 202 mock_get.return_value.__aenter__.return_value.reason = "OK" await salus_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert len(salus_prov._cache) == 2 assert img1.key in salus_prov._cache assert img2.key in salus_prov._cache cached_1 = salus_prov._cache[img1.key] assert cached_1.model == img1.model base = "http://eu.salusconnect.io/download/firmware/" assert ( cached_1.url == base + "a65779cd-13cd-41e5-a7e0-5346f24a0f62/45856_00000006.tar.gz" ) cached_2 = salus_prov._cache[img2.key] assert cached_2.model == img2.model assert ( cached_2.url == base + "3319b501-98f3-4337-afbe-8d04bb9938bc/45857_00000006.tar.gz" ) assert not salus_prov.expired @patch("aiohttp.ClientSession.get") async def test_salus_refresh_list_locked( mock_get, salus_prov, salus_image_with_version ): await salus_prov._locks[ota_p.LOCK_REFRESH].acquire() mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) await salus_prov.refresh_firmware_list() assert mock_get.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_salus_refresh_list_failed(mock_get, salus_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" with patch.object(salus_prov, "update_expiration") as update_exp: await salus_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert update_exp.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_salus_fetch_image(mock_get, salus_image_with_version): data = bytes.fromhex( # based on ikea sample but modded mfr code "1ef1ee0b0001380000007810012178563412020054657374204f544120496d61" "676500000000000000000000000000000000000042000000" ) sub_el = b"\x00\x00\x04\x00\x00\x00abcd" # construct tar.gz from header + sub_el binstr = data + sub_el fh = io.BytesIO() # don't create a real file on disk, just in RAM. with tarfile.open(fileobj=fh, mode="w:gz") as tar: info = tarfile.TarInfo("salus_sample.ota") info.size = len(binstr) tar.addfile(info, io.BytesIO(binstr)) img = salus_image_with_version(model=SALUS_MODEL) img.url = mock.sentinel.url mock_get.return_value.__aenter__.return_value.read = AsyncMock( side_effect=[fh.getvalue()] ) r = await img.fetch_image() assert isinstance(r, zigpy.ota.image.OTAImage) assert mock_get.call_count == 1 assert mock_get.call_args[0][0] == mock.sentinel.url assert r.serialize() == data + sub_el zigpy-0.62.3/tests/ota_providers/test_ota_provider_sonoff.py000066400000000000000000000127611456054056700244710ustar00rootroot00000000000000from unittest import mock import pytest from zigpy.config import CONF_OTA_SONOFF import zigpy.ota import zigpy.ota.image import zigpy.ota.provider as ota_p from tests.async_mock import AsyncMock, patch MANUFACTURER_ID = 4742 IMAGE_TYPE = 1 @pytest.fixture def sonoff_prov(): p = ota_p.Sonoff() p.enable() return p @pytest.fixture def sonoff_image_with_version(): def img(version=4353, image_type=IMAGE_TYPE): img = zigpy.ota.provider.SONOFFImage( manufacturer_id=MANUFACTURER_ID, image_type=image_type, version=version, image_size=131086, url=mock.sentinel.url, ) return img return img @pytest.fixture def sonoff_image(sonoff_image_with_version): return sonoff_image_with_version() @pytest.fixture def sonoff_key(): return zigpy.ota.image.ImageKey(MANUFACTURER_ID, IMAGE_TYPE) async def test_sonoff_init(sonoff_prov): sonoff_prov.enable = mock.MagicMock() sonoff_prov.refresh_firmware_list = AsyncMock() r = await sonoff_prov.initialize_provider({CONF_OTA_SONOFF: True}) assert r is None assert sonoff_prov.enable.call_count == 1 assert sonoff_prov.refresh_firmware_list.call_count == 1 async def test_sonoff_get_image_no_cache(sonoff_prov, sonoff_image): sonoff_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) sonoff_prov._cache = mock.MagicMock() sonoff_prov._cache.__getitem__.side_effect = KeyError() sonoff_prov.refresh_firmware_list = AsyncMock() # SONOFF manufacturer_id, but not in cache assert sonoff_image.key not in sonoff_prov._cache r = await sonoff_prov.get_image(sonoff_image.key) assert r is None assert sonoff_prov.refresh_firmware_list.call_count == 1 assert sonoff_prov._cache.__getitem__.call_count == 1 assert sonoff_image.fetch_image.call_count == 0 async def test_sonoff_get_image(sonoff_prov, sonoff_key, sonoff_image): sonoff_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) sonoff_prov._cache = mock.MagicMock() sonoff_prov._cache.__getitem__.return_value = sonoff_image sonoff_prov.refresh_firmware_list = AsyncMock() r = await sonoff_prov.get_image(sonoff_key) assert r is mock.sentinel.image assert sonoff_prov._cache.__getitem__.call_count == 1 assert sonoff_prov._cache.__getitem__.call_args[0][0] == sonoff_image.key assert sonoff_image.fetch_image.call_count == 1 @patch("aiohttp.ClientSession.get") async def test_sonoff_refresh_list(mock_get, sonoff_prov, sonoff_image_with_version): img = sonoff_image_with_version(version=4353, image_type=1) mock_get.return_value.__aenter__.return_value.json = AsyncMock( return_value=[ { "fw_binary_url": "https://zigbee-ota.sonoff.tech/releases/86-0001-00001101.zigbee", "fw_file_version": 4353, "fw_filesize": 131086, "fw_image_type": 1, "fw_manufacturer_id": 4742, "model_id": "ZBMINI-L", } ] ) mock_get.return_value.__aenter__.return_value.status = 200 mock_get.return_value.__aenter__.return_value.reason = "OK" await sonoff_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert len(sonoff_prov._cache) == 1 assert img.key in sonoff_prov._cache cached = sonoff_prov._cache[img.key] assert cached.image_type == img.image_type assert ( cached.url == "https://zigbee-ota.sonoff.tech/releases/86-0001-00001101.zigbee" ) assert not sonoff_prov.expired @patch("aiohttp.ClientSession.get") async def test_sonoff_refresh_list_locked( mock_get, sonoff_prov, sonoff_image_with_version ): await sonoff_prov._locks[ota_p.LOCK_REFRESH].acquire() mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) await sonoff_prov.refresh_firmware_list() assert mock_get.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_sonoff_refresh_list_failed(mock_get, sonoff_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" with patch.object(sonoff_prov, "update_expiration") as update_exp: await sonoff_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert update_exp.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_sonoff_fetch_image(mock_get, sonoff_image_with_version): image = zigpy.ota.image.OTAImage( header=zigpy.ota.image.OTAImageHeader( upgrade_file_id=200208670, header_version=256, header_length=56, field_control=zigpy.ota.image.FieldControl(0), manufacturer_id=4742, image_type=1, file_version=4353, stack_version=2, header_string="", image_size=66, ), subelements=[ zigpy.ota.image.SubElement( tag_id=zigpy.ota.image.ElementTagId.UPGRADE_IMAGE, data=b"abcd" ) ], ) img = sonoff_image_with_version(version=4353, image_type=1) img.url = mock.sentinel.url mock_get.return_value.__aenter__.return_value.read = AsyncMock( return_value=image.serialize() ) r = await img.fetch_image() assert isinstance(r, zigpy.ota.image.OTAImage) assert mock_get.call_count == 1 assert mock_get.call_args[0][0] == mock.sentinel.url assert r == image zigpy-0.62.3/tests/ota_providers/test_ota_provider_thirdreality.py000066400000000000000000000133411456054056700256760ustar00rootroot00000000000000from unittest import mock import pytest from zigpy.config import CONF_OTA_THIRDREALITY import zigpy.ota import zigpy.ota.image from zigpy.ota.provider import LOCK_REFRESH, ThirdReality, ThirdRealityImage from tests.async_mock import AsyncMock, patch MANUFACTURER_ID = 4659 @pytest.fixture def thirdreality_prov(): p = ThirdReality() p.enable() return p @pytest.fixture def thirdreality_image(): return ThirdRealityImage.from_json( { "modelId": "3RSB22BZ", "url": "https://tr-zha.s3.amazonaws.com/firmwares/SmartButton_Zigbee_PROD_OTA_V21_1.00.21.ota", "version": "1.00.21", "imageType": 54184, "manufacturerId": 4659, "fileVersion": 33, } ) async def test_thirdreality_init(thirdreality_prov): thirdreality_prov.enable = mock.MagicMock() thirdreality_prov.refresh_firmware_list = AsyncMock() r = await thirdreality_prov.initialize_provider({CONF_OTA_THIRDREALITY: True}) assert r is None assert thirdreality_prov.enable.call_count == 1 assert thirdreality_prov.refresh_firmware_list.call_count == 1 async def test_thirdreality_get_image_no_cache(thirdreality_prov, thirdreality_image): thirdreality_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) thirdreality_prov._cache = mock.MagicMock() thirdreality_prov._cache.__getitem__.side_effect = KeyError() thirdreality_prov.refresh_firmware_list = AsyncMock() # ThirdReality manufacturer_id, but not in cache assert thirdreality_image.key not in thirdreality_prov._cache r = await thirdreality_prov.get_image(thirdreality_image.key) assert r is None assert thirdreality_prov.refresh_firmware_list.call_count == 1 assert thirdreality_prov._cache.__getitem__.call_count == 1 assert thirdreality_image.fetch_image.call_count == 0 async def test_thirdreality_get_image(thirdreality_prov, thirdreality_image): thirdreality_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) thirdreality_prov._cache = mock.MagicMock() thirdreality_prov._cache.__getitem__.return_value = thirdreality_image thirdreality_prov.refresh_firmware_list = AsyncMock() r = await thirdreality_prov.get_image(thirdreality_image.key) assert r is mock.sentinel.image assert thirdreality_prov._cache.__getitem__.call_count == 1 assert ( thirdreality_prov._cache.__getitem__.mock_calls[0].args[0] == thirdreality_image.key ) assert thirdreality_image.fetch_image.call_count == 1 @patch("aiohttp.ClientSession.get") async def test_thirdreality_refresh_list( mock_get, thirdreality_prov, thirdreality_image ): mock_get.return_value.__aenter__.return_value.json = AsyncMock( return_value={ "versions": [ { "modelId": "3RSB22BZ", "url": "https://tr-zha.s3.amazonaws.com/firmwares/SmartButton_Zigbee_PROD_OTA_V21_1.00.21.ota", "version": "1.00.21", "imageType": 54184, "manufacturerId": 4659, "fileVersion": 33, } ] } ) mock_get.return_value.__aenter__.return_value.status = 200 mock_get.return_value.__aenter__.return_value.reason = "OK" await thirdreality_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert len(thirdreality_prov._cache) == 1 assert thirdreality_image.key in thirdreality_prov._cache cached = thirdreality_prov._cache[thirdreality_image.key] assert cached.image_type == thirdreality_image.image_type assert ( cached.url == "https://tr-zha.s3.amazonaws.com/firmwares/SmartButton_Zigbee_PROD_OTA_V21_1.00.21.ota" ) assert not thirdreality_prov.expired @patch("aiohttp.ClientSession.get") async def test_thirdreality_refresh_list_locked(mock_get, thirdreality_prov): await thirdreality_prov._locks[LOCK_REFRESH].acquire() mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) await thirdreality_prov.refresh_firmware_list() assert mock_get.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_thirdreality_refresh_list_failed(mock_get, thirdreality_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" with patch.object(thirdreality_prov, "update_expiration") as update_exp: await thirdreality_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert update_exp.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_thirdreality_fetch_image(mock_get, thirdreality_image): image = zigpy.ota.image.OTAImage( header=zigpy.ota.image.OTAImageHeader( upgrade_file_id=200208670, header_version=256, header_length=56, field_control=zigpy.ota.image.FieldControl(0), manufacturer_id=MANUFACTURER_ID, image_type=54184, file_version=33, stack_version=2, header_string="Telink OTA Sample Usage", image_size=66, ), subelements=[ zigpy.ota.image.SubElement( tag_id=zigpy.ota.image.ElementTagId.UPGRADE_IMAGE, data=b"abcd" ) ], ) thirdreality_image.url = mock.sentinel.url mock_get.return_value.__aenter__.return_value.read = AsyncMock( return_value=image.serialize() ) r = await thirdreality_image.fetch_image() assert isinstance(r, zigpy.ota.image.OTAImage) assert mock_get.call_count == 1 assert mock_get.mock_calls[0].args[0] == mock.sentinel.url assert r == image zigpy-0.62.3/tests/test_app_state.py000066400000000000000000000126321456054056700175170ustar00rootroot00000000000000"""Test unit for app status and counters.""" import pytest import zigpy.state as app_state COUNTER_NAMES = ["counter_1", "counter_2", "some random name"] @pytest.fixture def counters(): """Counters fixture.""" counters = app_state.CounterGroup("ezsp_counters") for name in COUNTER_NAMES: counters[name] return counters def test_counter(): """Test basic counter.""" counter = app_state.Counter("mock_counter") assert counter.value == 0 counter = app_state.Counter("mock_counter", 5) assert counter.value == 5 assert counter.reset_count == 0 counter.update(5) assert counter.value == 5 assert counter.reset_count == 0 counter.update(8) assert counter.value == 8 assert counter.reset_count == 0 counter.update(9) assert counter.value == 9 assert counter.reset_count == 0 counter.reset() assert counter.value == 9 assert counter._raw_value == 0 assert counter.reset_count == 1 # new value after a counter was reset/clear counter.update(12) assert counter.value == 21 assert counter.reset_count == 1 counter.update(15) assert counter.value == 24 assert counter.reset_count == 1 # new counter value is less than previously reported. # assume counter was reset counter.update(14) assert counter.value == 24 + 14 assert counter.reset_count == 2 counter.reset_and_update(14) assert counter.value == 38 + 14 assert counter.reset_count == 3 def test_counter_str(): """Test counter str representation.""" counter = app_state.Counter("some_counter", 8) assert str(counter) == "some_counter = 8" def test_counters_init(): """Test counters initialization.""" counter_groups = app_state.CounterGroups() assert len(counter_groups) == 0 counters = counter_groups["ezsp_counters"] assert len(counter_groups) == 1 assert len(counters) == 0 assert counters.name == "ezsp_counters" for name in COUNTER_NAMES: counters[name] assert len(counters) == 3 cnt_1, cnt_2, cnt_3 = (counter for counter in counters.counters()) assert cnt_1.name == "counter_1" assert cnt_2.name == "counter_2" assert cnt_3.name == "some random name" assert cnt_1.value == 0 assert cnt_2.value == 0 assert cnt_3.value == 0 counters["some random name"].update(2) assert cnt_3.value == 2 assert counters["some random name"].value == 2 assert counters["some random name"] == 2 assert counters["some random name"] == cnt_3 assert int(cnt_3) == 2 assert "counter_2" in counters assert [counter.name for counter in counters.counters()] == COUNTER_NAMES counters.reset() for counter in counters.counters(): assert counter.reset_count == 1 def test_counters_str_and_repr(counters): """Test counters str and repr.""" counters["counter_1"].update(22) counters["counter_2"].update(33) assert ( str(counters) == "ezsp_counters: [counter_1 = 22, counter_2 = 33, some random name = 0]" ) assert ( repr(counters) == """CounterGroup('ezsp_counters', {Counter('counter_1', 22), """ """Counter('counter_2', 33), Counter('some random name', 0)})""" ) def test_state(): """Test state structure.""" state = app_state.State() assert state assert state.counters == {} assert state.counters["new_collection"]["counter_2"] == 0 assert state.counters["new_collection"]["counter_2"].reset_count == 0 assert state.counters["new_collection"]["counter_3"].reset_count == 0 state.counters["new_collection"]["counter_2"] = 2 def test_counters_reset(counters): """Test counter resetting.""" counter = counters["counter_1"] assert counter.reset_count == 0 counters["counter_1"].update(22) assert counter.value == 22 assert counter.reset_count == 0 counters.reset() assert counter.reset_count == 1 counter.update(22) assert counter.value == 44 assert counter.reset_count == 1 def test_counter_incr(): """Test counter increment.""" counter = app_state.Counter("counter_name", 42) assert counter == 42 counter.increment() assert counter == 43 counter.increment(5) assert counter == 48 assert counter.value == 48 with pytest.raises(AssertionError): counter.increment(-1) def test_counter_nested_groups_increment(): """Test nested counters.""" counters = app_state.CounterGroup("device_counters") assert len(counters) == 0 counters.increment("reply", "rx", "zdo", 0x8031) counters.increment("total", "rx", 3, 0x0006) counters.increment("total", "rx", 3, 0x0008) counters.increment("total", "rx", 3, 0x0300) tags = set(counters.tags()) assert {"rx"} == tags tags = set(counters["rx"].tags()) assert {"zdo", 3} == tags assert counters["rx"]["reply"] == 1 assert counters["rx"]["zdo"]["reply"] == 1 assert counters["rx"]["zdo"][0x8031]["reply"] == 1 assert counters["rx"]["total"] == 3 assert counters["rx"][3]["total"] == 3 assert counters["rx"][3][0x0006]["total"] == 1 assert counters["rx"][3][0x0008]["total"] == 1 assert counters["rx"][3][0x0300]["total"] == 1 def test_counter_groups(): """Test CounterGroups.""" groups = app_state.CounterGroups() assert not list(groups) counter_group = groups["ezsp_counters"] new_groups = list(groups) assert new_groups == [counter_group] zigpy-0.62.3/tests/test_appdb.py000066400000000000000000001075751456054056700166400ustar00rootroot00000000000000import asyncio import contextlib from datetime import datetime, timedelta, timezone import pathlib import sqlite3 import sys import threading import time import aiosqlite import freezegun import pytest from zigpy import profiles import zigpy.appdb import zigpy.application import zigpy.config as conf from zigpy.const import SIG_ENDPOINTS, SIG_MANUFACTURER, SIG_MODEL from zigpy.device import Device, Status import zigpy.endpoint import zigpy.ota from zigpy.quirks import CustomDevice import zigpy.types as t import zigpy.zcl from zigpy.zcl.foundation import Status as ZCLStatus from zigpy.zdo import types as zdo_t from tests.async_mock import AsyncMock, MagicMock, call, patch from tests.conftest import make_app, make_ieee from tests.test_backups import backup_factory # noqa: F401 @pytest.fixture(autouse=True) def auto_kill_aiosqlite(): """aiosqlite's background thread does not let pytest exit when a failure occurs""" yield for thread in threading.enumerate(): if not isinstance(thread, aiosqlite.core.Connection): continue try: conn = thread._conn except ValueError: pass else: with contextlib.suppress(zigpy.appdb.sqlite3.ProgrammingError): conn.close() thread._running = False async def make_app_with_db(database_file): if isinstance(database_file, pathlib.Path): database_file = str(database_file) app = make_app({conf.CONF_DATABASE: database_file}) await app._load_db() return app class FakeCustomDevice(CustomDevice): replacement = { "endpoints": { # Endpoint exists on original device 1: { "input_clusters": [0, 1, 3, 0x0008], "output_clusters": [6], }, # Endpoint is created only at runtime by the quirk 99: { "input_clusters": [0, 1, 3, 0x0008], "output_clusters": [6], "profile_id": 65535, "device_type": 123, }, } } def mock_dev_init(initialize: bool): """Device schedule_initialize mock factory.""" def _initialize(self): if initialize: self.node_desc = zdo_t.NodeDescriptor(0, 1, 2, 3, 4, 5, 6, 7, 8) return _initialize def _mk_rar(attrid, value, status=0): r = zigpy.zcl.foundation.ReadAttributeRecord() r.attrid = attrid r.status = status r.value = zigpy.zcl.foundation.TypeValue() r.value.value = value return r def fake_get_device(device): if device.endpoints.get(1) is not None and device[1].profile_id == 65535: return FakeCustomDevice(device.application, device.ieee, device.nwk, device) return device async def test_no_database(tmp_path): with patch("zigpy.appdb.PersistingListener.new", AsyncMock()) as db_mock: db_mock.return_value.load.side_effect = AsyncMock() await make_app_with_db(None) assert db_mock.return_value.load.call_count == 0 db = tmp_path / "test.db" with patch("zigpy.appdb.PersistingListener.new", AsyncMock()) as db_mock: db_mock.return_value.load.side_effect = AsyncMock() await make_app_with_db(db) assert db_mock.return_value.load.call_count == 1 @patch("zigpy.device.Device.schedule_initialize", new=mock_dev_init(True)) async def test_database(tmp_path): db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() relays_1 = [t.NWK(0x1234), t.NWK(0x2345)] relays_2 = [t.NWK(0x3456), t.NWK(0x4567)] app.handle_join(99, ieee, 0) app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(1) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP ep = dev.add_endpoint(2) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = 0xFFFD # Invalid clus = ep.add_input_cluster(0) ep.add_output_cluster(1) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 49246 ep.device_type = profiles.zll.DeviceType.COLOR_LIGHT app.device_initialized(dev) clus.update_attribute(0, 99) clus.update_attribute(4, bytes("Custom", "ascii")) clus.update_attribute(5, bytes("Model", "ascii")) clus.listener_event("cluster_command", 0) clus.listener_event("general_command") dev.relays = relays_1 signature = dev.get_signature() assert ep.endpoint_id in signature[SIG_ENDPOINTS] assert SIG_MANUFACTURER not in signature assert SIG_MODEL not in signature dev.manufacturer = "Custom" dev.model = "Model" assert dev.get_signature()[SIG_MANUFACTURER] == "Custom" assert dev.get_signature()[SIG_MODEL] == "Model" ts = time.time() dev.last_seen = ts dev_last_seen = dev.last_seen assert isinstance(dev.last_seen, float) assert abs(dev.last_seen - ts) < 0.01 # Test a CustomDevice custom_ieee = make_ieee(1) app.handle_join(199, custom_ieee, 0) dev = app.get_device(custom_ieee) app.device_initialized(dev) ep = dev.add_endpoint(1) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.device_type = profiles.zll.DeviceType.COLOR_LIGHT ep.profile_id = 65535 with patch("zigpy.quirks.get_device", fake_get_device): app.device_initialized(dev) assert isinstance(app.get_device(custom_ieee), FakeCustomDevice) assert isinstance(app.get_device(custom_ieee), CustomDevice) dev = app.get_device(custom_ieee) app.device_initialized(dev) dev.relays = relays_2 dev.endpoints[1].level.update_attribute(0x0011, 17) dev.endpoints[99].level.update_attribute(0x0011, 17) assert dev.endpoints[1].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.endpoints[99].in_clusters[0x0008]._attr_cache[0x0011] == 17 custom_dev_last_seen = dev.last_seen assert isinstance(custom_dev_last_seen, float) await app.shutdown() # Everything should've been saved - check that it re-loads with patch("zigpy.quirks.get_device", fake_get_device): app2 = await make_app_with_db(db) dev = app2.get_device(ieee) assert dev.endpoints[1].device_type == profiles.zha.DeviceType.PUMP assert dev.endpoints[2].device_type == 0xFFFD assert dev.endpoints[2].in_clusters[0]._attr_cache[0] == 99 assert dev.endpoints[2].in_clusters[0]._attr_cache[4] == bytes("Custom", "ascii") assert dev.endpoints[2].in_clusters[0]._attr_cache[5] == bytes("Model", "ascii") assert dev.endpoints[2].manufacturer == "Custom" assert dev.endpoints[2].model == "Model" assert dev.endpoints[2].out_clusters[1].cluster_id == 1 assert dev.endpoints[3].device_type == profiles.zll.DeviceType.COLOR_LIGHT assert dev.relays == relays_1 # The timestamp won't be restored exactly but it is more than close enough assert abs(dev.last_seen - dev_last_seen) < 0.01 dev = app2.get_device(custom_ieee) # This virtual attribute is added by the quirk, there is no corresponding cluster # stored in the database, nor is there a corresponding endpoint 99 assert dev.endpoints[1].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.endpoints[99].in_clusters[0x0008]._attr_cache[0x0011] == 17 assert dev.relays == relays_2 assert abs(dev.last_seen - custom_dev_last_seen) < 0.01 dev.relays = None app.handle_leave(99, ieee) await app2.shutdown() app3 = await make_app_with_db(db) assert ieee in app3.devices async def mockleave(*args, **kwargs): return [0] app3.devices[ieee].zdo.leave = mockleave await app3.remove(ieee) for _i in range(1, 20): await asyncio.sleep(0) assert ieee not in app3.devices await app3.shutdown() app4 = await make_app_with_db(db) assert ieee not in app4.devices dev = app4.get_device(custom_ieee) assert dev.relays is None await app4.shutdown() @patch("zigpy.device.Device.schedule_group_membership_scan", MagicMock()) async def _test_null_padded(tmp_path, test_manufacturer=None, test_model=None): db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() with patch( "zigpy.device.Device.schedule_initialize", new=mock_dev_init(True), ): app.handle_join(99, ieee, 0) app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0) ep.add_output_cluster(1) app.device_initialized(dev) clus.update_attribute(4, test_manufacturer) clus.update_attribute(5, test_model) clus.listener_event("cluster_command", 0) clus.listener_event("zdo_command") await app.shutdown() # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) dev = app2.get_device(ieee) assert dev.endpoints[3].device_type == profiles.zha.DeviceType.PUMP assert dev.endpoints[3].in_clusters[0]._attr_cache[4] == test_manufacturer assert dev.endpoints[3].in_clusters[0]._attr_cache[5] == test_model await app2.shutdown() return dev async def test_appdb_load_null_padded_manuf(tmp_path): manufacturer = b"Mock Manufacturer\x00\x04\\\x00\\\x00\x00\x00\x00\x00\x07" model = b"Mock Model" dev = await _test_null_padded(tmp_path, manufacturer, model) assert dev.manufacturer == "Mock Manufacturer" assert dev.model == "Mock Model" assert dev.endpoints[3].manufacturer == "Mock Manufacturer" assert dev.endpoints[3].model == "Mock Model" async def test_appdb_load_null_padded_model(tmp_path): manufacturer = b"Mock Manufacturer" model = b"Mock Model\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" dev = await _test_null_padded(tmp_path, manufacturer, model) assert dev.manufacturer == "Mock Manufacturer" assert dev.model == "Mock Model" assert dev.endpoints[3].manufacturer == "Mock Manufacturer" assert dev.endpoints[3].model == "Mock Model" async def test_appdb_load_null_padded_manuf_model(tmp_path): manufacturer = b"Mock Manufacturer\x00\x04\\\x00\\\x00\x00\x00\x00\x00\x07" model = b"Mock Model\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" dev = await _test_null_padded(tmp_path, manufacturer, model) assert dev.manufacturer == "Mock Manufacturer" assert dev.model == "Mock Model" assert dev.endpoints[3].manufacturer == "Mock Manufacturer" assert dev.endpoints[3].model == "Mock Model" async def test_appdb_str_model(tmp_path): manufacturer = "Mock Manufacturer" model = "Mock Model" dev = await _test_null_padded(tmp_path, manufacturer, model) assert dev.manufacturer == "Mock Manufacturer" assert dev.model == "Mock Model" assert dev.endpoints[3].manufacturer == "Mock Manufacturer" assert dev.endpoints[3].model == "Mock Model" @patch.object(Device, "schedule_initialize", new=mock_dev_init(True)) @patch("zigpy.zcl.Cluster.request", new_callable=AsyncMock) async def test_groups(mock_request, tmp_path): """Test group adding/removing.""" group_id, group_name = 0x1221, "app db Test Group 0x1221" mock_request.return_value = [ZCLStatus.SUCCESS, group_id] db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(1) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP ep.add_input_cluster(4) app.device_initialized(dev) ieee_b = make_ieee(2) app.handle_join(100, ieee_b, 0) dev_b = app.get_device(ieee_b) ep_b = dev_b.add_endpoint(2) ep_b.status = zigpy.endpoint.Status.ZDO_INIT ep_b.profile_id = 260 ep_b.device_type = profiles.zha.DeviceType.PUMP ep_b.add_input_cluster(4) app.device_initialized(dev_b) await ep.add_to_group(group_id, group_name) await ep_b.add_to_group(group_id, group_name) assert group_id in app.groups group = app.groups[group_id] assert group.name == group_name assert (dev.ieee, ep.endpoint_id) in group assert (dev_b.ieee, ep_b.endpoint_id) in group assert group_id in ep.member_of assert group_id in ep_b.member_of await app.shutdown() del app, dev, dev_b, ep, ep_b # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) dev2 = app2.get_device(ieee) assert group_id in app2.groups group = app2.groups[group_id] assert group.name == group_name assert (dev2.ieee, 1) in group assert group_id in dev2.endpoints[1].member_of dev2_b = app2.get_device(ieee_b) assert (dev2_b.ieee, 2) in group assert group_id in dev2_b.endpoints[2].member_of # check member removal await dev2_b.remove_from_group(group_id) await app2.shutdown() del app2, dev2, dev2_b app3 = await make_app_with_db(db) dev3 = app3.get_device(ieee) assert group_id in app3.groups group = app3.groups[group_id] assert group.name == group_name assert (dev3.ieee, 1) in group assert group_id in dev3.endpoints[1].member_of dev3_b = app3.get_device(ieee_b) assert (dev3_b.ieee, 2) not in group assert group_id not in dev3_b.endpoints[2].member_of # check group removal await dev3.remove_from_group(group_id) await app3.shutdown() del app3, dev3, dev3_b app4 = await make_app_with_db(db) dev4 = app4.get_device(ieee) assert group_id in app4.groups assert not app4.groups[group_id] assert group_id not in dev4.endpoints[1].member_of app4.groups.pop(group_id) await app4.shutdown() del app4, dev4 app5 = await make_app_with_db(db) assert not app5.groups await app5.shutdown() @pytest.mark.parametrize("dev_init", (True, False)) async def test_attribute_update(tmp_path, dev_init): """Test attribute update for initialized and uninitialized devices.""" db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() with patch( "zigpy.device.Device.schedule_initialize", new=mock_dev_init(initialize=dev_init), ): app.handle_join(99, ieee, 0) test_manufacturer = "Test Manufacturer" test_model = "Test Model" dev = app.get_device(ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0x0000) ep.add_output_cluster(0x0001) clus.update_attribute(0x0004, test_manufacturer) clus.update_attribute(0x0005, test_model) app.device_initialized(dev) await app.shutdown() attr_update_time = clus._attr_last_updated[0x0004] # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) dev = app2.get_device(ieee) assert dev.is_initialized == dev_init assert dev.endpoints[3].device_type == profiles.zha.DeviceType.PUMP clus = dev.endpoints[3].in_clusters[0x0000] assert clus._attr_cache[0x0004] == test_manufacturer assert clus._attr_cache[0x0005] == test_model assert (attr_update_time - clus._attr_last_updated[0x0004]) < timedelta(seconds=0.1) await app2.shutdown() @patch.object(Device, "schedule_initialize", new=mock_dev_init(True)) async def test_attribute_update_short_interval(tmp_path): """Test updating an attribute twice in a short interval.""" db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0x0000) ep.add_output_cluster(0x0001) clus.update_attribute(0x0004, "Custom") clus.update_attribute(0x0005, "Model") app.device_initialized(dev) # wait for the device initialization to write attribute cache to db await asyncio.sleep(0.01) # update an attribute twice in a short interval clus.update_attribute(0x4000, "1.0") attr_update_time_first = clus._attr_last_updated[0x4000] # update attribute again 10 seconds later fake_time = datetime.utcnow() + timedelta(seconds=10) with freezegun.freeze_time(fake_time): clus.update_attribute(0x4000, "2.0") await app.shutdown() # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) dev = app2.get_device(ieee) clus = dev.endpoints[3].in_clusters[0x0000] assert clus._attr_cache[0x4000] == "2.0" # verify second attribute update was saved # verify the first update attribute time was not overwritten, as it was within the short interval assert (attr_update_time_first - clus._attr_last_updated[0x0004]) < timedelta( seconds=0.1 ) await app2.shutdown() @patch("zigpy.topology.REQUEST_DELAY", (0, 0)) @patch.object(Device, "schedule_initialize", new=mock_dev_init(True)) async def test_topology(tmp_path): """Test neighbor loading.""" ext_pid = t.EUI64.convert("aa:bb:cc:dd:ee:ff:01:02") neighbor1 = zdo_t.Neighbor( extended_pan_id=ext_pid, ieee=make_ieee(1), nwk=0x1111, device_type=zdo_t.Neighbor.DeviceType.EndDevice, rx_on_when_idle=1, relationship=zdo_t.Neighbor.Relationship.Child, reserved1=0, permit_joining=0, reserved2=0, depth=15, lqi=250, ) neighbor2 = zdo_t.Neighbor( extended_pan_id=ext_pid, ieee=make_ieee(2), nwk=0x1112, device_type=zdo_t.Neighbor.DeviceType.EndDevice, rx_on_when_idle=1, relationship=zdo_t.Neighbor.Relationship.Child, reserved1=0, permit_joining=0, reserved2=0, depth=15, lqi=250, ) route1 = zdo_t.Route( DstNWK=0x1234, RouteStatus=zdo_t.RouteStatus.Active, MemoryConstrained=0, ManyToOne=0, RouteRecordRequired=0, Reserved=0, NextHop=0x6789, ) route2 = zdo_t.Route( DstNWK=0x1235, RouteStatus=zdo_t.RouteStatus.Active, MemoryConstrained=0, ManyToOne=0, RouteRecordRequired=0, Reserved=0, NextHop=0x6790, ) ieee = make_ieee(0) nwk = 0x9876 db = tmp_path / "test.db" app = await make_app_with_db(db) app.handle_join(nwk, ieee, 0x0000) dev = app.get_device(ieee) dev.node_desc = zdo_t.NodeDescriptor( logical_type=zdo_t.LogicalType.Router, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=zdo_t.NodeDescriptor.FrequencyBand.Freq2400MHz, mac_capability_flags=zdo_t.NodeDescriptor.MACCapabilityFlags.AllocateAddress, manufacturer_code=4174, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=0, maximum_outgoing_transfer_size=82, descriptor_capability_field=zdo_t.NodeDescriptor.DescriptorCapability.NONE, ) ep1 = dev.add_endpoint(1) ep1.status = zigpy.endpoint.Status.ZDO_INIT ep1.profile_id = 260 ep1.device_type = 0x1234 app.device_initialized(dev) p1 = patch.object( app.topology, "_scan_neighbors", new=AsyncMock(return_value=[neighbor1, neighbor2]), ) p2 = patch.object( app.topology, "_scan_routes", new=AsyncMock(return_value=[route1, route2]), ) with p1, p2: await app.topology.scan() assert len(app.topology.neighbors[ieee]) == 2 assert neighbor1 in app.topology.neighbors[ieee] assert neighbor2 in app.topology.neighbors[ieee] assert len(app.topology.routes[ieee]) == 2 assert route1 in app.topology.routes[ieee] assert route2 in app.topology.routes[ieee] await app.shutdown() del dev # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) app2.get_device(ieee) assert len(app2.topology.neighbors[ieee]) == 2 assert neighbor1 in app2.topology.neighbors[ieee] assert neighbor2 in app2.topology.neighbors[ieee] assert len(app2.topology.routes[ieee]) == 2 assert route1 in app2.topology.routes[ieee] assert route2 in app2.topology.routes[ieee] await app2.shutdown() @patch("zigpy.device.Device.schedule_initialize", new=mock_dev_init(True)) async def test_device_rejoin(tmp_path): db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() nwk = 199 app.handle_join(nwk, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(1) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 65535 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0) ep.add_output_cluster(1) app.device_initialized(dev) clus.update_attribute(4, "Custom") clus.update_attribute(5, "Model") await app.shutdown() # Everything should've been saved - check that it re-loads with patch("zigpy.quirks.get_device", fake_get_device): app2 = await make_app_with_db(db) dev = app2.get_device(ieee) assert dev.nwk == nwk assert dev.endpoints[1].device_type == profiles.zha.DeviceType.PUMP assert dev.endpoints[1].in_clusters[0]._attr_cache[4] == "Custom" assert dev.endpoints[1].in_clusters[0]._attr_cache[5] == "Model" assert dev.endpoints[1].manufacturer == "Custom" assert dev.endpoints[1].model == "Model" # device rejoins dev.nwk = nwk + 1 with patch("zigpy.quirks.get_device", fake_get_device): app2.device_initialized(dev) await app2.shutdown() app3 = await make_app_with_db(db) dev = app3.get_device(ieee) assert dev.nwk == nwk + 1 assert dev.endpoints[1].device_type == profiles.zha.DeviceType.PUMP assert 0 in dev.endpoints[1].in_clusters assert dev.endpoints[1].manufacturer == "Custom" assert dev.endpoints[1].model == "Model" await app3.shutdown() @patch("zigpy.device.Device.schedule_initialize", new=mock_dev_init(True)) async def test_stopped_appdb_listener(tmp_path): db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(1) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0) ep.add_output_cluster(1) app.device_initialized(dev) with patch("zigpy.appdb.PersistingListener._save_attribute") as mock_attr_save: clus.update_attribute(0, 99) clus.update_attribute(4, bytes("Custom", "ascii")) clus.update_attribute(5, bytes("Model", "ascii")) await app.shutdown() assert mock_attr_save.call_count == 3 clus.update_attribute(0, 100) for _i in range(100): await asyncio.sleep(0) assert mock_attr_save.call_count == 3 @patch.object(Device, "schedule_initialize", new=mock_dev_init(True)) async def test_invalid_node_desc(tmp_path): """devices without a valid node descriptor should not save the node descriptor.""" ieee_1 = make_ieee(1) nwk_1 = 0x1111 db = tmp_path / "test.db" app = await make_app_with_db(db) app.handle_join(nwk_1, ieee_1, 0) dev_1 = app.get_device(ieee_1) dev_1.node_desc = None ep = dev_1.add_endpoint(1) ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP ep.status = zigpy.endpoint.Status.ZDO_INIT app.device_initialized(dev_1) await app.shutdown() # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) dev_2 = app2.get_device(ieee=ieee_1) assert dev_2.node_desc is None assert dev_2.nwk == dev_1.nwk assert dev_2.ieee == dev_1.ieee assert dev_2.status == dev_1.status await app2.shutdown() async def test_appdb_worker_exception(tmp_path): """Exceptions should not kill the appdb worker.""" app_mock = MagicMock(name="ControllerApplication") db = tmp_path / "test.db" ieee_1 = make_ieee(1) dev_1 = zigpy.device.Device(app_mock, ieee_1, 0x1111) dev_1.status = Status.ENDPOINTS_INIT dev_1.node_desc = MagicMock() dev_1.node_desc.is_valid = True dev_1.node_desc.serialize.side_effect = AttributeError with patch( "zigpy.appdb.PersistingListener._save_device", wraps=zigpy.appdb.PersistingListener._save_device, ) as save_mock: db_listener = await zigpy.appdb.PersistingListener.new(db, app_mock) for _ in range(3): db_listener.raw_device_initialized(dev_1) await db_listener.shutdown() assert save_mock.await_count == 3 @pytest.mark.parametrize("dev_init", (True, False)) async def test_unsupported_attribute(tmp_path, dev_init): """Test adding unsupported attributes for initialized and uninitialized devices.""" db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() with patch( "zigpy.device.Device.schedule_initialize", new=mock_dev_init(initialize=dev_init), ): app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0) ep.add_output_cluster(1) clus.update_attribute(4, "Custom") clus.update_attribute(5, "Model") app.device_initialized(dev) clus.add_unsupported_attribute(0x0010) clus.add_unsupported_attribute("physical_env") await app.shutdown() # Everything should've been saved - check that it re-loads app2 = await make_app_with_db(db) dev = app2.get_device(ieee) assert dev.is_initialized == dev_init assert dev.endpoints[3].device_type == profiles.zha.DeviceType.PUMP assert 0x0010 in dev.endpoints[3].in_clusters[0].unsupported_attributes assert "location_desc" in dev.endpoints[3].in_clusters[0].unsupported_attributes assert 0x0011 in dev.endpoints[3].in_clusters[0].unsupported_attributes assert "physical_env" in dev.endpoints[3].in_clusters[0].unsupported_attributes await app2.shutdown() async def mockrequest( is_general_req, command, schema, args, manufacturer=None, **kwargs ): assert is_general_req is True assert command == 0 rar0010 = _mk_rar(0x0010, "Not Removed", zigpy.zcl.foundation.Status.SUCCESS) return [[rar0010]] # Now lets remove an unsupported attribute and make sure it is removed app3 = await make_app_with_db(db) dev = app3.get_device(ieee) assert dev.is_initialized == dev_init assert dev.endpoints[3].device_type == profiles.zha.DeviceType.PUMP cluster = dev.endpoints[3].in_clusters[0] assert 0x0010 in dev.endpoints[3].in_clusters[0].unsupported_attributes cluster.request = mockrequest await cluster.read_attributes([0x0010], allow_cache=False) assert 0x0010 not in dev.endpoints[3].in_clusters[0].unsupported_attributes assert "location_desc" not in dev.endpoints[3].in_clusters[0].unsupported_attributes assert dev.endpoints[3].in_clusters[0].get(0x0010) == "Not Removed" assert 0x0011 in dev.endpoints[3].in_clusters[0].unsupported_attributes assert "physical_env" in dev.endpoints[3].in_clusters[0].unsupported_attributes await app3.shutdown() # Everything should've been saved - check that it re-loads app4 = await make_app_with_db(db) dev = app4.get_device(ieee) assert dev.is_initialized == dev_init assert dev.endpoints[3].device_type == profiles.zha.DeviceType.PUMP assert 0x0010 not in dev.endpoints[3].in_clusters[0].unsupported_attributes assert dev.endpoints[3].in_clusters[0].get(0x0010) == "Not Removed" assert "location_desc" not in dev.endpoints[3].in_clusters[0].unsupported_attributes assert 0x0011 in dev.endpoints[3].in_clusters[0].unsupported_attributes assert "physical_env" in dev.endpoints[3].in_clusters[0].unsupported_attributes await app4.shutdown() @patch.object(Device, "schedule_initialize", new=mock_dev_init(True)) async def test_load_unsupp_attr_wrong_cluster(tmp_path): """Test loading unsupported attribute from the wrong cluster.""" db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0) ep.add_output_cluster(1) clus.update_attribute(4, "Custom") clus.update_attribute(5, "Model") app.device_initialized(dev) await app.shutdown() del clus del ep del dev # add unsupported attr for missing endpoint app = await make_app_with_db(db) dev = app.get_device(ieee) ep = dev.endpoints[3] clus = ep.add_input_cluster(2) clus.add_unsupported_attribute(0) await app.shutdown() del clus del ep del dev # reload app = await make_app_with_db(db) await app.shutdown() @patch.object(Device, "schedule_initialize", new=mock_dev_init(True)) async def test_load_unsupp_attr_missing_endpoint(tmp_path): """Test loading unsupported attribute from the wrong cluster.""" db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() app.handle_join(99, ieee, 0) dev = app.get_device(ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0x0000) ep.add_output_cluster(0x0001) clus.update_attribute(0x0004, "Custom") clus.update_attribute(0x0005, "Model") ep = dev.add_endpoint(4) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0x0006) app.device_initialized(dev) # Make an attribute unsupported clus.add_unsupported_attribute(0x0000) await app.shutdown() del clus del ep del dev def remove_cluster(device): device.endpoints.pop(4) return device # Simulate a quirk that removes the entire endpoint with patch("zigpy.quirks.get_device", side_effect=remove_cluster): # The application should still load app = await make_app_with_db(db) dev = app.get_device(ieee) assert 4 not in dev.endpoints await app.shutdown() async def test_last_seen(tmp_path): db = tmp_path / "test.db" app = await make_app_with_db(db) ieee = make_ieee() app.handle_join(99, ieee, 0) dev = app.get_device(ieee=ieee) ep = dev.add_endpoint(3) ep.status = zigpy.endpoint.Status.ZDO_INIT ep.profile_id = 260 ep.device_type = profiles.zha.DeviceType.PUMP clus = ep.add_input_cluster(0) ep.add_output_cluster(1) clus.update_attribute(4, "Custom") clus.update_attribute(5, "Model") app.device_initialized(dev) old_last_seen = dev.last_seen await app.shutdown() # The `last_seen` of a joined device persists app = await make_app_with_db(db) dev = app.get_device(ieee=ieee) await app.shutdown() next_last_seen = dev.last_seen assert abs(next_last_seen - old_last_seen) < 0.01 app = await make_app_with_db(db) dev = app.get_device(ieee=ieee) # Last-seen is only written to the db every 30s (no write case) now = datetime.fromtimestamp(dev.last_seen + 5, timezone.utc) with freezegun.freeze_time(now): dev.update_last_seen() await app.shutdown() app = await make_app_with_db(db) dev = app.get_device(ieee=ieee) assert dev.last_seen == next_last_seen # no change await app.shutdown() app = await make_app_with_db(db) dev = app.get_device(ieee=ieee) # Last-seen is only written to the db every 30s (write case) now = datetime.fromtimestamp(dev.last_seen + 35, timezone.utc) with freezegun.freeze_time(now): dev.update_last_seen() await app.shutdown() # And it will be updated when the database next loads app = await make_app_with_db(db) dev = app.get_device(ieee=ieee) assert dev.last_seen >= next_last_seen + 35 # updated await app.shutdown() @pytest.mark.parametrize( "stdlib_version,use_sqlite", [ ((1, 0, 0), False), ((2, 0, 0), False), ((3, 0, 0), False), ((3, 24, 0), True), ((4, 0, 0), True), ], ) def test_pysqlite_load_success(stdlib_version, use_sqlite): """Test that the internal import SQLite helper picks the correct module.""" pysqlite3 = MagicMock() pysqlite3.sqlite_version_info = (3, 30, 0) with patch.dict(sys.modules, {"pysqlite3": pysqlite3}), patch.object( sys.modules["sqlite3"], "sqlite_version_info", new=stdlib_version ): module = zigpy.appdb._import_compatible_sqlite3(zigpy.appdb.MIN_SQLITE_VERSION) if use_sqlite: assert module is sqlite3 else: assert module is pysqlite3 @pytest.mark.parametrize( "stdlib_version,pysqlite3_version", [ ((1, 0, 0), None), ((1, 0, 0), (1, 0, 1)), ], ) def test_pysqlite_load_failure(stdlib_version, pysqlite3_version): """ Test that the internal import SQLite helper will throw an error when no compatible module can be found. """ if pysqlite3_version is not None: pysqlite3 = MagicMock() pysqlite3.sqlite_version_info = pysqlite3_version pysqlite3_patch = patch.dict(sys.modules, {"pysqlite3": pysqlite3}) else: pysqlite3_patch = patch.dict(sys.modules, {"pysqlite3": None}) with pysqlite3_patch, patch.object( sys.modules["sqlite3"], "sqlite_version_info", new=stdlib_version ): with pytest.raises(RuntimeError): zigpy.appdb._import_compatible_sqlite3(zigpy.appdb.MIN_SQLITE_VERSION) async def test_appdb_network_backups(tmp_path, backup_factory): # noqa: F811 db = tmp_path / "test.db" backup = backup_factory() app1 = await make_app_with_db(db) app1.backups.add_backup(backup) await app1.shutdown() # The backup is reloaded from the database as well app2 = await make_app_with_db(db) assert len(app2.backups.backups) == 1 assert app2.backups.backups[0] == backup new_backup = backup_factory() new_backup.network_info.network_key.tx_counter += 10000 app2.backups.add_backup(new_backup) await app2.shutdown() # The database will contain only the single backup app3 = await make_app_with_db(db) assert len(app3.backups.backups) == 1 assert app3.backups.backups[0] == new_backup assert app3.backups.backups[0] != backup await app3.shutdown() async def test_appdb_network_backups_format_change( tmp_path, backup_factory ): # noqa: F811 db = tmp_path / "test.db" backup = backup_factory() backup.as_dict = MagicMock(return_value={"some new key": 1, **backup.as_dict()}) app1 = await make_app_with_db(db) app1.backups.add_backup(backup) await app1.shutdown() # The backup is reloaded from the database as well app2 = await make_app_with_db(db) assert len(app2.backups.backups) == 1 assert app2.backups.backups[0] == backup new_backup = backup_factory() new_backup.network_info.network_key.tx_counter += 10000 app2.backups.add_backup(new_backup) await app2.shutdown() # The database will contain only the single backup with patch("zigpy.backups.BackupManager.add_backup") as mock_add_backup: app3 = await make_app_with_db(db) await app3.shutdown() assert mock_add_backup.mock_calls == [call(new_backup, suppress_event=True)] async def test_appdb_persist_coordinator_info(tmp_path): # noqa: F811 db = tmp_path / "test.db" with patch( "zigpy.appdb.PersistingListener._save_attribute_cache", wraps=zigpy.appdb.PersistingListener._save_attribute_cache, ) as mock_save_attr_cache: app = await make_app_with_db(db) await app.initialize() await app.shutdown() assert mock_save_attr_cache.mock_calls == [call(app._device.endpoints[1])] zigpy-0.62.3/tests/test_appdb_migration.py000066400000000000000000000371101456054056700206740ustar00rootroot00000000000000import logging import pathlib from sqlite3.dump import _iterdump as iterdump from aiosqlite.context import contextmanager import pytest import zigpy.appdb from zigpy.appdb import sqlite3 import zigpy.appdb_schemas import zigpy.types as t from zigpy.zdo import types as zdo_t from tests.async_mock import AsyncMock, MagicMock, patch from tests.conftest import app # noqa: F401 from tests.test_appdb import auto_kill_aiosqlite, make_app_with_db # noqa: F401 @pytest.fixture def test_db(tmp_path): def inner(filename): databases = pathlib.Path(__file__).parent / "databases" db_path = tmp_path / filename if filename.endswith(".db"): db_path.write_bytes((databases / filename).read_bytes()) return str(db_path) conn = sqlite3.connect(str(db_path)) sql = (databases / filename).read_text() conn.executescript(sql) conn.commit() conn.close() return str(db_path) return inner def dump_db(path): with sqlite3.connect(path) as conn: cur = conn.cursor() cur.execute("PRAGMA user_version") (user_version,) = cur.fetchone() sql = "\n".join(iterdump(conn)) return user_version, sql @pytest.mark.parametrize("open_twice", [False, True]) async def test_migration_from_3_to_4(open_twice, test_db): test_db_v3 = test_db("simple_v3.sql") with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() neighbors_before = list(cur.execute("SELECT * FROM neighbors")) assert len(neighbors_before) == 2 assert all(len(row) == 8 for row in neighbors_before) node_descs_before = list(cur.execute("SELECT * FROM node_descriptors")) assert len(node_descs_before) == 2 assert all(len(row) == 2 for row in node_descs_before) # Ensure migration works on first run, and after shutdown if open_twice: app = await make_app_with_db(test_db_v3) await app.shutdown() app = await make_app_with_db(test_db_v3) dev1 = app.get_device(nwk=0xBD4D) assert dev1.node_desc == zdo_t.NodeDescriptor( logical_type=zdo_t.LogicalType.Router, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=zdo_t.NodeDescriptor.FrequencyBand.Freq2400MHz, mac_capability_flags=142, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=0, ) assert len(app.topology.neighbors[dev1.ieee]) == 1 assert app.topology.neighbors[dev1.ieee][0] == zdo_t.Neighbor( extended_pan_id=t.ExtendedPanId.convert("81:b1:12:dc:9f:bd:f4:b6"), ieee=t.EUI64.convert("ec:1b:bd:ff:fe:54:4f:40"), nwk=0x6D1C, reserved1=0, device_type=zdo_t.Neighbor.DeviceType.Router, rx_on_when_idle=1, relationship=zdo_t.Neighbor.RelationShip.Sibling, reserved2=0, permit_joining=2, depth=15, lqi=130, ) dev2 = app.get_device(nwk=0x6D1C) assert dev2.node_desc == dev1.node_desc.replace(manufacturer_code=4456) assert len(app.topology.neighbors[dev2.ieee]) == 1 assert app.topology.neighbors[dev2.ieee][0] == zdo_t.Neighbor( extended_pan_id=t.ExtendedPanId.convert("81:b1:12:dc:9f:bd:f4:b6"), ieee=t.EUI64.convert("00:0d:6f:ff:fe:a6:11:7a"), nwk=0xBD4D, reserved1=0, device_type=zdo_t.Neighbor.DeviceType.Router, rx_on_when_idle=1, relationship=zdo_t.Neighbor.RelationShip.Sibling, reserved2=0, permit_joining=2, depth=15, lqi=132, ) await app.shutdown() with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() # Old tables are untouched assert neighbors_before == list(cur.execute("SELECT * FROM neighbors")) assert node_descs_before == list(cur.execute("SELECT * FROM node_descriptors")) # New tables exist neighbors_after = list(cur.execute("SELECT * FROM neighbors_v4")) assert len(neighbors_after) == 2 assert all(len(row) == 12 for row in neighbors_after) node_descs_after = list(cur.execute("SELECT * FROM node_descriptors_v4")) assert len(node_descs_after) == 2 assert all(len(row) == 14 for row in node_descs_after) async def test_migration_0_to_5(test_db): test_db_v0 = test_db("zigbee_20190417_v0.db") with sqlite3.connect(test_db_v0) as conn: cur = conn.cursor() cur.execute("SELECT count(*) FROM devices") (num_devices_before_migration,) = cur.fetchone() assert num_devices_before_migration == 27 app1 = await make_app_with_db(test_db_v0) await app1.shutdown() assert len(app1.devices) == 27 app2 = await make_app_with_db(test_db_v0) await app2.shutdown() # All 27 devices migrated assert len(app2.devices) == 27 async def test_migration_missing_neighbors_v3(test_db): test_db_v3 = test_db("simple_v3.sql") with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() cur.execute("DROP TABLE neighbors") # Ensure the table doesn't exist with pytest.raises(sqlite3.OperationalError): cur.execute("SELECT * FROM neighbors") # Migration won't fail even though the database version number is 3 app = await make_app_with_db(test_db_v3) await app.shutdown() # Version was upgraded with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() cur.execute("PRAGMA user_version") assert cur.fetchone() == (zigpy.appdb.DB_VERSION,) @pytest.mark.parametrize("corrupt_device", [False, True]) async def test_migration_bad_attributes(test_db, corrupt_device): test_db_bad_attrs = test_db("bad_attrs_v3.db") with sqlite3.connect(test_db_bad_attrs) as conn: cur = conn.cursor() cur.execute("SELECT count(*) FROM devices") (num_devices_before_migration,) = cur.fetchone() cur.execute("SELECT count(*) FROM endpoints") (num_ep_before_migration,) = cur.fetchone() if corrupt_device: with sqlite3.connect(test_db_bad_attrs) as conn: cur = conn.cursor() cur.execute("DELETE FROM endpoints WHERE ieee='60:a4:23:ff:fe:02:39:7b'") cur.execute("SELECT changes()") (deleted_eps,) = cur.fetchone() else: deleted_eps = 0 # Migration will handle invalid attributes entries app = await make_app_with_db(test_db_bad_attrs) await app.shutdown() assert len(app.devices) == num_devices_before_migration assert ( sum(len(d.non_zdo_endpoints) for d in app.devices.values()) == num_ep_before_migration - deleted_eps ) app2 = await make_app_with_db(test_db_bad_attrs) await app2.shutdown() # All devices still exist assert len(app2.devices) == num_devices_before_migration assert ( sum(len(d.non_zdo_endpoints) for d in app2.devices.values()) == num_ep_before_migration - deleted_eps ) with sqlite3.connect(test_db_bad_attrs) as conn: cur = conn.cursor() cur.execute("PRAGMA user_version") # Ensure the final database schema version number does not decrease assert cur.fetchone()[0] >= zigpy.appdb.DB_VERSION async def test_migration_missing_node_descriptor(test_db, caplog): test_db_v3 = test_db("simple_v3.sql") ieee = "ec:1b:bd:ff:fe:54:4f:40" with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() cur.execute("DELETE FROM node_descriptors WHERE ieee=?", [ieee]) with caplog.at_level(logging.WARNING): # The invalid device will still be loaded, for now app = await make_app_with_db(test_db_v3) assert len(app.devices) == 2 bad_dev = app.devices[t.EUI64.convert(ieee)] assert bad_dev.node_desc is None caplog.clear() # Saving the device should cause the node descriptor to not be saved await app._dblistener._save_device(bad_dev) await app.shutdown() # The node descriptor is not in the database with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() cur.execute( f"SELECT * FROM node_descriptors{zigpy.appdb.DB_V} WHERE ieee=?", [ieee] ) assert not cur.fetchall() @pytest.mark.parametrize( "fail_on_sql,fail_on_count", [ ("INSERT INTO node_descriptors_v4 VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", 0), ("INSERT INTO neighbors_v4 VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", 5), ("SELECT * FROM output_clusters", 0), ("INSERT INTO neighbors_v5 VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", 5), ], ) async def test_migration_failure(fail_on_sql, fail_on_count, test_db): test_db_bad_attrs = test_db("bad_attrs_v3.db") before = dump_db(test_db_bad_attrs) assert before[0] == 3 count = 0 sql_seen = False execute = zigpy.appdb.PersistingListener.execute def patched_execute(self, sql, *args, **kwargs): nonlocal count, sql_seen if sql == fail_on_sql: sql_seen = True if count == fail_on_count: raise sqlite3.ProgrammingError("Uh oh") count += 1 return execute(self, sql, *args, **kwargs) with patch("zigpy.appdb.PersistingListener.execute", new=patched_execute): with pytest.raises(sqlite3.ProgrammingError): await make_app_with_db(test_db_bad_attrs) assert sql_seen after = dump_db(test_db_bad_attrs) assert before == after async def test_migration_failure_version_mismatch(test_db): """Test migration failure when the `user_version` and table versions don't match.""" test_db_v3 = test_db("simple_v3.sql") # Migrate it to the latest version app = await make_app_with_db(test_db_v3) await app.shutdown() # Downgrade it back to v7 with sqlite3.connect(test_db_v3) as conn: conn.execute("PRAGMA user_version=7") # Startup now fails due to the version mismatch with pytest.raises(zigpy.exceptions.CorruptDatabase): await make_app_with_db(test_db_v3) async def test_migration_downgrade_warning(test_db, caplog): """Test V4 re-migration which was forcibly downgraded to v3.""" test_db_v3 = test_db("simple_v3.sql") # Migrate it to the latest version app = await make_app_with_db(test_db_v3) await app.shutdown() # Upgrade it beyond our current version with sqlite3.connect(test_db_v3) as conn: conn.execute("CREATE TABLE future_table_v100(column)") conn.execute("PRAGMA user_version=100") # Startup now logs an error due to the "downgrade" with caplog.at_level(logging.ERROR): app2 = await make_app_with_db(test_db_v3) await app2.shutdown() assert "Downgrading zigpy" in caplog.text # Ensure the version was not touched with sqlite3.connect(test_db_v3) as conn: user_version = conn.execute("PRAGMA user_version").fetchone()[0] assert user_version == 100 @pytest.mark.parametrize("with_bad_neighbor", [False, True]) async def test_v4_to_v5_migration_bad_neighbors(test_db, with_bad_neighbor): """V4 migration has no `neighbors_v4` foreign key and no `ON DELETE CASCADE`""" test_db_v4 = test_db("simple_v3_to_v4.sql") with sqlite3.connect(test_db_v4) as conn: cur = conn.cursor() if with_bad_neighbor: # Row refers to an invalid device, left behind by a bad `DELETE` cur.execute( """ INSERT INTO neighbors_v4 VALUES ( '11:aa:bb:cc:dd:ee:ff:00', '22:aa:bb:cc:dd:ee:ff:00', '33:aa:bb:cc:dd:ee:ff:00', 12345, 1,1,2,0,2,0,15,132 ) """ ) (num_v4_neighbors,) = cur.execute( "SELECT count(*) FROM neighbors_v4" ).fetchone() app = await make_app_with_db(test_db_v4) await app.shutdown() with sqlite3.connect(test_db_v4) as conn: (num_new_neighbors,) = cur.execute( f"SELECT count(*) FROM neighbors{zigpy.appdb.DB_V}" ).fetchone() # Only the invalid row was not migrated if with_bad_neighbor: assert num_new_neighbors == num_v4_neighbors - 1 else: assert num_new_neighbors == num_v4_neighbors @pytest.mark.parametrize("with_quirk_attribute", [False, True]) async def test_v4_to_v6_migration_missing_endpoints(test_db, with_quirk_attribute): """V5's schema was too rigid and failed to migrate endpoints created by quirks""" test_db_v3 = test_db("simple_v3.sql") if with_quirk_attribute: with sqlite3.connect(test_db_v3) as conn: cur = conn.cursor() cur.execute( """ INSERT INTO attributes VALUES ( '00:0d:6f:ff:fe:a6:11:7a', 123, 456, 789, 'test' ) """ ) def get_device(dev): if dev.ieee == t.EUI64.convert("00:0d:6f:ff:fe:a6:11:7a"): ep = dev.add_endpoint(123) ep.add_input_cluster(456) return dev # Migrate to v5 and then v6 with patch("zigpy.quirks.get_device", get_device): app = await make_app_with_db(test_db_v3) if with_quirk_attribute: dev = app.get_device(ieee=t.EUI64.convert("00:0d:6f:ff:fe:a6:11:7a")) assert dev.endpoints[123].in_clusters[456]._attr_cache[789] == "test" await app.shutdown() async def test_v5_to_v7_migration(test_db): test_db_v5 = test_db("simple_v5.sql") app = await make_app_with_db(test_db_v5) await app.shutdown() async def test_migration_missing_tables(app): conn = MagicMock() conn.close = AsyncMock() appdb = zigpy.appdb.PersistingListener(conn, app) appdb._get_table_versions = AsyncMock( return_value={"table1_v1": "1", "table1": "", "table2_v1": "1"} ) mock_execute = AsyncMock() appdb.execute = contextmanager(mock_execute) appdb._db._execute = AsyncMock() # Migrations must explicitly specify all old tables, even if they will be untouched with pytest.raises(RuntimeError): await appdb._migrate_tables( { "table1_v1": "table1_v2", # "table2_v1": "table2_v2", } ) # The untouched table will never be queried await appdb._migrate_tables({"table1_v1": "table1_v2", "table2_v1": None}) mock_execute.assert_called_once_with("SELECT * FROM table1_v1") with pytest.raises(AssertionError): mock_execute.assert_called_once_with("SELECT * FROM table2_v1") await appdb.shutdown() async def test_last_seen_initial_migration(test_db): test_db_v5 = test_db("simple_v5.sql") # To preserve the old behavior, `0` will not be exposed to ZHA, only `None` app = await make_app_with_db(test_db_v5) dev = app.get_device(nwk=0xBD4D) assert dev.last_seen is None dev.update_last_seen() assert isinstance(dev.last_seen, float) await app.shutdown() # But the device's `last_seen` will still update properly when it's actually set app = await make_app_with_db(test_db_v5) assert isinstance(app.get_device(nwk=0xBD4D).last_seen, float) await app.shutdown() def test_db_version_is_latest_schema_version(): assert max(zigpy.appdb_schemas.SCHEMAS.keys()) == zigpy.appdb.DB_VERSION async def test_last_seen_migration_v8_to_v9(test_db): test_db_v8 = test_db("simple_v8.sql") app = await make_app_with_db(test_db_v8) assert int(app.get_device(nwk=0xE01E).last_seen) == 1651119830 await app.shutdown() zigpy-0.62.3/tests/test_appdb_pysqlite.py000066400000000000000000000021231456054056700205510ustar00rootroot00000000000000import sqlite3 import pytest from tests.async_mock import patch try: import pysqlite3 except ImportError: pass else: @pytest.fixture(scope="module", autouse=True) def force_use_pysqlite3(): # Make the sqlite3 module "be" pysqlite3 with patch.multiple( target=sqlite3, **{ attr: getattr(pysqlite3, attr) for attr in dir(pysqlite3) if hasattr(sqlite3, attr) }, ): # Ensure the module was patched assert sqlite3.connect is pysqlite3.connect # Directly replace it as well in `zigpy.appdb` with patch("zigpy.appdb.sqlite3", pysqlite3): yield # Ensure the module is unpatched assert sqlite3.connect is not pysqlite3.connect # Re-run most of the appdb tests from tests.test_appdb import * # noqa: F401,F403 from tests.test_appdb_migration import * # type:ignore[no-redef] # noqa: F401,F403 del test_pysqlite_load_success # noqa: F821 del test_pysqlite_load_failure # noqa: F821 zigpy-0.62.3/tests/test_application.py000066400000000000000000001353171456054056700200500ustar00rootroot00000000000000import asyncio import errno import logging from unittest import mock from unittest.mock import ANY, PropertyMock, call import pytest import voluptuous as vol import zigpy.application import zigpy.config as conf from zigpy.exceptions import ( DeliveryError, NetworkNotFormed, NetworkSettingsInconsistent, TransientConnectionError, ) import zigpy.ota import zigpy.quirks import zigpy.types as t from zigpy.zcl import clusters, foundation import zigpy.zdo.types as zdo_t from .async_mock import AsyncMock, MagicMock, patch, sentinel from .conftest import ( NCP_IEEE, App, make_app, make_ieee, make_neighbor, make_neighbor_from_device, make_node_desc, ) @pytest.fixture def ieee(): return make_ieee() @patch("zigpy.ota.OTA", spec_set=zigpy.ota.OTA) async def test_new_exception(ota_mock): p1 = patch.object(App, "_load_db", AsyncMock()) p2 = patch.object(App, "load_network_info", AsyncMock()) p3 = patch.object(App, "shutdown", AsyncMock()) ota_mock.return_value.initialize = AsyncMock() with p1 as db_mck, p2 as load_nwk_info_mck, p3 as shut_mck: await App.new( { conf.CONF_DATABASE: "/dev/null", conf.CONF_DEVICE: {conf.CONF_DEVICE_PATH: "/dev/null"}, conf.CONF_STARTUP_ENERGY_SCAN: False, } ) assert db_mck.call_count == 1 assert db_mck.await_count == 1 assert ota_mock.return_value.initialize.call_count == 1 assert load_nwk_info_mck.call_count == 1 assert load_nwk_info_mck.await_count == 1 assert shut_mck.call_count == 0 assert shut_mck.await_count == 0 with p1 as db_mck, p2 as load_nwk_info_mck, p3 as shut_mck: load_nwk_info_mck.side_effect = asyncio.TimeoutError() with pytest.raises(asyncio.TimeoutError): await App.new( { conf.CONF_DATABASE: "/dev/null", conf.CONF_DEVICE: {conf.CONF_DEVICE_PATH: "/dev/null"}, conf.CONF_STARTUP_ENERGY_SCAN: False, } ) assert db_mck.call_count == 2 assert db_mck.await_count == 2 assert ota_mock.return_value.initialize.call_count == 1 assert load_nwk_info_mck.call_count == 2 assert load_nwk_info_mck.await_count == 2 assert shut_mck.call_count == 1 assert shut_mck.await_count == 1 async def test_permit(app, ieee): app.devices[ieee] = MagicMock() app.devices[ieee].zdo.permit = AsyncMock() app.permit_ncp = AsyncMock() await app.permit(node=(1, 1, 1, 1, 1, 1, 1, 1)) assert app.devices[ieee].zdo.permit.call_count == 0 assert app.permit_ncp.call_count == 0 await app.permit(node=ieee) assert app.devices[ieee].zdo.permit.call_count == 1 assert app.permit_ncp.call_count == 0 await app.permit(node=NCP_IEEE) assert app.devices[ieee].zdo.permit.call_count == 1 assert app.permit_ncp.call_count == 1 async def test_permit_delivery_failure(app, ieee): def zdo_permit(*args, **kwargs): raise DeliveryError("Failed") app.devices[ieee] = MagicMock() app.devices[ieee].zdo.permit = zdo_permit app.permit_ncp = AsyncMock() await app.permit(node=ieee) assert app.permit_ncp.call_count == 0 async def test_permit_broadcast(app): app.permit_ncp = AsyncMock() app.send_packet = AsyncMock() await app.permit(time_s=30) assert app.send_packet.call_count == 1 assert app.permit_ncp.call_count == 1 assert app.send_packet.mock_calls[0].args[0].dst.addr_mode == t.AddrMode.Broadcast @patch("zigpy.device.Device.initialize", new_callable=AsyncMock) async def test_join_handler_skip(init_mock, app, ieee): node_desc = make_node_desc() app.handle_join(1, ieee, None) app.get_device(ieee).node_desc = node_desc app.handle_join(1, ieee, None) assert app.get_device(ieee).node_desc == node_desc async def test_join_handler_change_id(app, ieee): app.handle_join(1, ieee, None) app.handle_join(2, ieee, None) assert app.devices[ieee].nwk == 2 async def test_unknown_device_left(app, ieee): with patch.object(app, "listener_event", wraps=app.listener_event): app.handle_leave(0x1234, ieee) app.listener_event.assert_not_called() async def test_known_device_left(app, ieee): dev = app.add_device(ieee, 0x1234) with patch.object(app, "listener_event", wraps=app.listener_event): app.handle_leave(0x1234, ieee) app.listener_event.assert_called_once_with("device_left", dev) async def _remove( app, ieee, retval, zdo_reply=True, delivery_failure=True, has_node_desc=True ): async def leave(*args, **kwargs): if zdo_reply: return retval elif delivery_failure: raise DeliveryError("Error") else: raise asyncio.TimeoutError device = MagicMock() device.ieee = ieee device.zdo.leave.side_effect = leave if has_node_desc: device.node_desc = zdo_t.NodeDescriptor(1, 64, 142, 4388, 82, 255, 0, 255, 0) else: device.node_desc = None app.devices[ieee] = device await app.remove(ieee) for _i in range(1, 20): await asyncio.sleep(0) assert ieee not in app.devices async def test_remove(app, ieee): """Test remove with successful zdo status.""" with patch.object(app, "_remove_device", wraps=app._remove_device) as remove_device: await _remove(app, ieee, [0]) assert remove_device.await_count == 1 async def test_remove_with_failed_zdo(app, ieee): """Test remove with unsuccessful zdo status.""" with patch.object(app, "_remove_device", wraps=app._remove_device) as remove_device: await _remove(app, ieee, [1]) assert remove_device.await_count == 1 async def test_remove_nonexistent(app, ieee): with patch.object(app, "_remove_device", AsyncMock()) as remove_device: await app.remove(ieee) for _i in range(1, 20): await asyncio.sleep(0) assert ieee not in app.devices assert remove_device.await_count == 0 async def test_remove_with_unreachable_device(app, ieee): with patch.object(app, "_remove_device", wraps=app._remove_device) as remove_device: await _remove(app, ieee, [0], zdo_reply=False) assert remove_device.await_count == 1 async def test_remove_with_reply_timeout(app, ieee): with patch.object(app, "_remove_device", wraps=app._remove_device) as remove_device: await _remove(app, ieee, [0], zdo_reply=False, delivery_failure=False) assert remove_device.await_count == 1 async def test_remove_without_node_desc(app, ieee): with patch.object(app, "_remove_device", wraps=app._remove_device) as remove_device: await _remove(app, ieee, [0], has_node_desc=False) assert remove_device.await_count == 1 def test_add_device(app, ieee): app.add_device(ieee, 8) app.add_device(ieee, 9) assert app.get_device(ieee).nwk == 9 def test_get_device_nwk(app, ieee): dev = app.add_device(ieee, 8) assert app.get_device(nwk=8) is dev def test_get_device_ieee(app, ieee): dev = app.add_device(ieee, 8) assert app.get_device(ieee=ieee) is dev def test_get_device_both(app, ieee): dev = app.add_device(ieee, 8) assert app.get_device(ieee=ieee, nwk=8) is dev def test_get_device_missing(app, ieee): with pytest.raises(KeyError): app.get_device(nwk=8) def test_device_property(app): app.add_device(nwk=0x0000, ieee=NCP_IEEE) assert app._device is app.get_device(ieee=NCP_IEEE) def test_ieee(app): assert app.state.node_info.ieee def test_nwk(app): assert app.state.node_info.nwk is not None def test_config(app): assert app.config == app._config def test_deserialize(app, ieee): dev = MagicMock() app.deserialize(dev, 1, 1, b"") assert dev.deserialize.call_count == 1 async def test_handle_message_shim(app): dev = MagicMock() dev.nwk = 0x1234 app.packet_received = MagicMock(spec_set=app.packet_received) app.handle_message(dev, 260, 1, 2, 3, b"data") assert app.packet_received.mock_calls == [ call( t.ZigbeePacket( profile_id=260, cluster_id=1, src_ep=2, dst_ep=3, data=t.SerializableBytes(b"data"), src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=0x1234, ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=0x0000, ), ) ) ] @patch("zigpy.device.Device.is_initialized", new_callable=PropertyMock) @patch("zigpy.quirks.handle_message_from_uninitialized_sender", new=MagicMock()) async def test_handle_message_uninitialized_dev(is_init_mock, app, ieee): dev = app.add_device(ieee, 0x1234) dev.packet_received = MagicMock() is_init_mock.return_value = False assert not dev.initializing def make_packet( profile_id: int, cluster_id: int, src_ep: int, dst_ep: int, data: bytes ) -> t.ZigbeePacket: return t.ZigbeePacket( profile_id=profile_id, cluster_id=cluster_id, src_ep=src_ep, dst_ep=dst_ep, data=t.SerializableBytes(data), src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=dev.nwk, ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=0x0000, ), ) # Power Configuration cluster not allowed, no endpoints app.packet_received( make_packet(profile_id=260, cluster_id=0x0001, src_ep=1, dst_ep=1, data=b"test") ) assert dev.packet_received.call_count == 0 assert zigpy.quirks.handle_message_from_uninitialized_sender.call_count == 1 # Device should be completing initialization assert dev.initializing # ZDO is allowed app.packet_received( make_packet(profile_id=260, cluster_id=0x0000, src_ep=0, dst_ep=0, data=b"test") ) assert dev.packet_received.call_count == 1 # Endpoint is uninitialized but Basic attribute read responses still work ep = dev.add_endpoint(1) app.packet_received( make_packet(profile_id=260, cluster_id=0x0000, src_ep=1, dst_ep=1, data=b"test") ) assert dev.packet_received.call_count == 2 # Others still do not app.packet_received( make_packet(profile_id=260, cluster_id=0x0001, src_ep=1, dst_ep=1, data=b"test") ) assert dev.packet_received.call_count == 2 assert zigpy.quirks.handle_message_from_uninitialized_sender.call_count == 2 # They work after the endpoint is initialized ep.status = zigpy.endpoint.Status.ZDO_INIT app.packet_received( make_packet(profile_id=260, cluster_id=0x0001, src_ep=1, dst_ep=1, data=b"test") ) assert dev.packet_received.call_count == 3 assert zigpy.quirks.handle_message_from_uninitialized_sender.call_count == 2 def test_get_dst_address(app): r = app.get_dst_address(MagicMock()) assert r.addrmode == 3 assert r.endpoint == 1 def test_props(app): assert app.state.network_info.channel is not None assert app.state.network_info.channel_mask is not None assert app.state.network_info.extended_pan_id is not None assert app.state.network_info.pan_id is not None assert app.state.network_info.nwk_update_id is not None def test_app_config_setter(app): """Test configuration setter.""" cfg_copy = app.config.copy() assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is False with pytest.raises(vol.Invalid): cfg_copy[conf.CONF_OTA][conf.CONF_OTA_IKEA] = "invalid bool" app.config = cfg_copy assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is False cfg_copy[conf.CONF_OTA][conf.CONF_OTA_IKEA] = True app.config = cfg_copy assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is True with pytest.raises(vol.Invalid): cfg_copy[conf.CONF_OTA][conf.CONF_OTA_IKEA] = "invalid bool" app.config = cfg_copy assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is True def test_app_update_config(app): """Test configuration partial update.""" assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is False with pytest.raises(vol.Invalid): app.update_config({conf.CONF_OTA: {conf.CONF_OTA_IKEA: "invalid bool"}}) assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is False app.update_config({conf.CONF_OTA: {conf.CONF_OTA_IKEA: "yes"}}) assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is True with pytest.raises(vol.Invalid): app.update_config({conf.CONF_OTA: {conf.CONF_OTA_IKEA: "invalid bool"}}) assert app.config[conf.CONF_OTA][conf.CONF_OTA_IKEA] is True async def test_uninitialized_message_handlers(app, ieee): """Test uninitialized message handlers.""" handler_1 = MagicMock(return_value=None) handler_2 = MagicMock(return_value=True) zigpy.quirks.register_uninitialized_device_message_handler(handler_1) zigpy.quirks.register_uninitialized_device_message_handler(handler_2) device = app.add_device(ieee, 0x1234) app.handle_message(device, 0x0260, 0x0000, 0, 0, b"123abcd23") assert handler_1.call_count == 0 assert handler_2.call_count == 0 app.handle_message(device, 0x0260, 0x0000, 1, 1, b"123abcd23") assert handler_1.call_count == 1 assert handler_2.call_count == 1 handler_1.return_value = True app.handle_message(device, 0x0260, 0x0000, 1, 1, b"123abcd23") assert handler_1.call_count == 2 assert handler_2.call_count == 1 async def test_remove_parent_devices(app, make_initialized_device): """Test removing an end device with parents.""" end_device = make_initialized_device(app) end_device.node_desc.logical_type = zdo_t.LogicalType.EndDevice router_1 = make_initialized_device(app) router_1.node_desc.logical_type = zdo_t.LogicalType.Router router_2 = make_initialized_device(app) router_2.node_desc.logical_type = zdo_t.LogicalType.Router parent = make_initialized_device(app) app.topology.neighbors[router_1.ieee] = [ make_neighbor_from_device(router_2), make_neighbor_from_device(parent), ] app.topology.neighbors[router_2.ieee] = [ make_neighbor_from_device(parent), make_neighbor_from_device(router_1), ] app.topology.neighbors[parent.ieee] = [ make_neighbor_from_device(router_2), make_neighbor_from_device(router_1), make_neighbor_from_device(end_device), make_neighbor(ieee=make_ieee(123), nwk=0x9876), ] p1 = patch.object(end_device.zdo, "leave", AsyncMock()) p2 = patch.object(end_device.zdo, "request", AsyncMock()) p3 = patch.object(parent.zdo, "leave", AsyncMock()) p4 = patch.object(parent.zdo, "request", AsyncMock()) p5 = patch.object(router_1.zdo, "leave", AsyncMock()) p6 = patch.object(router_1.zdo, "request", AsyncMock()) p7 = patch.object(router_2.zdo, "leave", AsyncMock()) p8 = patch.object(router_2.zdo, "request", AsyncMock()) with p1, p2, p3, p4, p5, p6, p7, p8: await app.remove(end_device.ieee) for _i in range(1, 60): await asyncio.sleep(0) assert end_device.zdo.leave.await_count == 1 assert end_device.zdo.request.await_count == 0 assert router_1.zdo.leave.await_count == 0 assert router_1.zdo.request.await_count == 0 assert router_2.zdo.leave.await_count == 0 assert router_2.zdo.request.await_count == 0 assert parent.zdo.leave.await_count == 0 assert parent.zdo.request.await_count == 1 @patch("zigpy.device.Device.schedule_initialize", new_callable=MagicMock) @patch("zigpy.device.Device.schedule_group_membership_scan", new_callable=MagicMock) @patch("zigpy.device.Device.is_initialized", new_callable=PropertyMock) async def test_device_join_rejoin(is_init_mock, group_scan_mock, init_mock, app, ieee): app.listener_event = MagicMock() is_init_mock.return_value = False # First join is treated as a new join app.handle_join(0x0001, ieee, None) app.listener_event.assert_called_once_with("device_joined", ANY) app.listener_event.reset_mock() init_mock.assert_called_once() init_mock.reset_mock() # Second join with the same NWK is just a reset, not a join app.handle_join(0x0001, ieee, None) app.listener_event.assert_not_called() group_scan_mock.assert_not_called() # Since the device is still partially initialized, re-initialize it init_mock.assert_called_once() init_mock.reset_mock() # Another join with the same NWK but initialized will trigger a group re-scan is_init_mock.return_value = True app.handle_join(0x0001, ieee, None) is_init_mock.return_value = True app.listener_event.assert_not_called() group_scan_mock.assert_called_once() group_scan_mock.reset_mock() init_mock.assert_not_called() # Join with a different NWK but the same IEEE is a re-join app.handle_join(0x0002, ieee, None) app.listener_event.assert_called_once_with("device_joined", ANY) group_scan_mock.assert_not_called() init_mock.assert_called_once() async def test_get_device(app): """Test get_device.""" await app.startup() app.add_device(t.EUI64.convert("11:11:11:11:22:22:22:22"), 0x0000) dev_2 = app.add_device(app.state.node_info.ieee, 0x0000) app.add_device(t.EUI64.convert("11:11:11:11:22:22:22:33"), 0x0000) assert app.get_device(nwk=0x0000) is dev_2 async def test_probe_success(): config = {"path": "/dev/test"} with patch.object(App, "connect") as connect, patch.object( App, "disconnect" ) as disconnect: result = await App.probe(config) assert set(config.items()) <= set(result.items()) assert connect.await_count == 1 assert disconnect.await_count == 1 async def test_probe_failure(): config = {"path": "/dev/test"} with patch.object( App, "connect", side_effect=asyncio.TimeoutError ) as connect, patch.object(App, "disconnect") as disconnect: result = await App.probe(config) assert result is False assert connect.await_count == 1 assert disconnect.await_count == 1 async def test_form_network(app): with patch.object(app, "write_network_info") as write1: await app.form_network() with patch.object(app, "write_network_info") as write2: await app.form_network() nwk_info1 = write1.mock_calls[0].kwargs["network_info"] node_info1 = write1.mock_calls[0].kwargs["node_info"] nwk_info2 = write2.mock_calls[0].kwargs["network_info"] node_info2 = write2.mock_calls[0].kwargs["node_info"] assert node_info1 == node_info2 # Critical network settings are randomized assert nwk_info1.extended_pan_id != nwk_info2.extended_pan_id assert nwk_info1.pan_id != nwk_info2.pan_id assert nwk_info1.network_key != nwk_info2.network_key # The well-known TCLK is used assert ( nwk_info1.tc_link_key.key == nwk_info2.tc_link_key.key == t.KeyData(b"ZigBeeAlliance09") ) assert nwk_info1.channel in (11, 15, 20, 25) @mock.patch("zigpy.util.pick_optimal_channel", mock.Mock(return_value=22)) async def test_form_network_find_best_channel(app): orig_start_network = app.start_network async def start_network(*args, **kwargs): start_network.await_count += 1 if start_network.await_count == 1: raise NetworkNotFormed() return await orig_start_network(*args, **kwargs) start_network.await_count = 0 app.start_network = start_network with patch.object(app, "write_network_info") as write: with patch.object( app.backups, "create_backup", wraps=app.backups.create_backup ) as create_backup: await app.form_network() assert start_network.await_count == 2 # A temporary network will be formed first nwk_info1 = write.mock_calls[0].kwargs["network_info"] assert nwk_info1.channel == 11 # Then, after the scan, a better channel is chosen nwk_info2 = write.mock_calls[1].kwargs["network_info"] assert nwk_info2.channel == 22 # Only a single backup will be present assert create_backup.await_count == 1 async def test_startup_formed(): app = make_app({conf.CONF_STARTUP_ENERGY_SCAN: False}) app.start_network = AsyncMock(wraps=app.start_network) app.form_network = AsyncMock() app.permit = AsyncMock() await app.startup(auto_form=False) assert app.start_network.await_count == 1 assert app.form_network.await_count == 0 assert app.permit.await_count == 1 async def test_startup_not_formed(): app = make_app({conf.CONF_STARTUP_ENERGY_SCAN: False}) app.start_network = AsyncMock(wraps=app.start_network) app.form_network = AsyncMock() app.load_network_info = AsyncMock( side_effect=[NetworkNotFormed(), NetworkNotFormed(), None] ) app.permit = AsyncMock() app.backups.backups = [] app.backups.restore_backup = AsyncMock() with pytest.raises(NetworkNotFormed): await app.startup(auto_form=False) assert app.start_network.await_count == 0 assert app.form_network.await_count == 0 assert app.permit.await_count == 0 await app.startup(auto_form=True) assert app.start_network.await_count == 1 assert app.form_network.await_count == 1 assert app.permit.await_count == 1 assert app.backups.restore_backup.await_count == 0 async def test_startup_not_formed_with_backup(): app = make_app({conf.CONF_STARTUP_ENERGY_SCAN: False}) app.start_network = AsyncMock(wraps=app.start_network) app.load_network_info = AsyncMock(side_effect=[NetworkNotFormed(), None]) app.permit = AsyncMock() app.backups.restore_backup = AsyncMock() app.backups.backups = [sentinel.OLD_BACKUP, sentinel.NEW_BACKUP] await app.startup(auto_form=True) assert app.start_network.await_count == 1 app.backups.restore_backup.assert_called_once_with(sentinel.NEW_BACKUP) async def test_startup_backup(): app = make_app({conf.CONF_NWK_BACKUP_ENABLED: True}) with patch("zigpy.backups.BackupManager.start_periodic_backups") as p: await app.startup() p.assert_called_once() async def test_startup_no_backup(): app = make_app({conf.CONF_NWK_BACKUP_ENABLED: False}) with patch("zigpy.backups.BackupManager.start_periodic_backups") as p: await app.startup() p.assert_not_called() def with_attributes(obj, **attrs): for k, v in attrs.items(): setattr(obj, k, v) return obj @pytest.mark.parametrize( "error", [ with_attributes(OSError("Network is unreachable"), errno=errno.ENETUNREACH), ConnectionRefusedError(), ], ) async def test_startup_failure_transient_error(error): app = make_app({conf.CONF_NWK_BACKUP_ENABLED: False}) with patch.object(app, "connect", side_effect=[error]): with pytest.raises(TransientConnectionError): await app.startup() @patch("zigpy.backups.BackupManager.from_network_state") @patch("zigpy.backups.BackupManager.most_recent_backup") async def test_initialize_compatible_backup( mock_most_recent_backup, mock_backup_from_state ): app = make_app({conf.CONF_NWK_VALIDATE_SETTINGS: True}) mock_backup_from_state.return_value.is_compatible_with.return_value = True await app.initialize() mock_backup_from_state.return_value.is_compatible_with.assert_called_once() mock_most_recent_backup.assert_called_once() @patch("zigpy.backups.BackupManager.from_network_state") @patch("zigpy.backups.BackupManager.most_recent_backup") async def test_initialize_incompatible_backup( mock_most_recent_backup, mock_backup_from_state ): app = make_app({conf.CONF_NWK_VALIDATE_SETTINGS: True}) mock_backup_from_state.return_value.is_compatible_with.return_value = False with pytest.raises(NetworkSettingsInconsistent) as exc: await app.initialize() mock_backup_from_state.return_value.is_compatible_with.assert_called_once() mock_most_recent_backup.assert_called_once() assert exc.value.old_state is mock_most_recent_backup() assert exc.value.new_state is mock_backup_from_state.return_value async def test_relays_received_device_exists(app): device = MagicMock() app._discover_unknown_device = AsyncMock(spec_set=app._discover_unknown_device) app.get_device = MagicMock(spec_set=app.get_device, return_value=device) app.handle_relays(nwk=0x1234, relays=[0x5678, 0xABCD]) app.get_device.assert_called_once_with(nwk=0x1234) assert device.relays == [0x5678, 0xABCD] assert app._discover_unknown_device.call_count == 0 async def test_relays_received_device_does_not_exist(app): app._discover_unknown_device = AsyncMock(spec_set=app._discover_unknown_device) app.get_device = MagicMock(wraps=app.get_device) app.handle_relays(nwk=0x1234, relays=[0x5678, 0xABCD]) app.get_device.assert_called_once_with(nwk=0x1234) app._discover_unknown_device.assert_called_once_with(nwk=0x1234) async def test_request_concurrency(): current_concurrency = 0 peak_concurrency = 0 class SlowApp(App): async def send_packet(self, packet): nonlocal current_concurrency, peak_concurrency async with self._limit_concurrency(): current_concurrency += 1 peak_concurrency = max(peak_concurrency, current_concurrency) await asyncio.sleep(0.1) current_concurrency -= 1 if packet % 10 == 7: # Fail randomly raise asyncio.DeliveryError() app = make_app({conf.CONF_MAX_CONCURRENT_REQUESTS: 16}, app_base=SlowApp) assert current_concurrency == 0 assert peak_concurrency == 0 await asyncio.gather( *[app.send_packet(i) for i in range(100)], return_exceptions=True ) assert current_concurrency == 0 assert peak_concurrency == 16 @pytest.fixture def device(): device = MagicMock() device.nwk = 0xABCD device.ieee = t.EUI64.convert("aa:bb:cc:dd:11:22:33:44") return device @pytest.fixture def packet(app, device): return t.ZigbeePacket( src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=app.state.node_info.nwk ), src_ep=0x9A, dst=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk), dst_ep=0xBC, tsn=0xDE, profile_id=0x1234, cluster_id=0x0006, data=t.SerializableBytes(b"test data"), source_route=None, extended_timeout=False, tx_options=t.TransmitOptions.NONE, ) async def test_request(app, device, packet): app.build_source_route_to = MagicMock(spec_set=app.build_source_route_to) async def send_request(app, **kwargs): kwargs = { "device": device, "profile": 0x1234, "cluster": 0x0006, "src_ep": 0x9A, "dst_ep": 0xBC, "sequence": 0xDE, "data": b"test data", "expect_reply": True, "use_ieee": False, "extended_timeout": False, **kwargs, } return await app.request(**kwargs) # Test sending with NWK status, msg = await send_request(app) assert status == zigpy.zcl.foundation.Status.SUCCESS assert isinstance(msg, str) app.send_packet.assert_called_once_with(packet) app.send_packet.reset_mock() # Test sending with IEEE await send_request(app, use_ieee=True) app.send_packet.assert_called_once_with( packet.replace( src=t.AddrModeAddress( addr_mode=t.AddrMode.IEEE, address=app.state.node_info.ieee ), dst=t.AddrModeAddress(addr_mode=t.AddrMode.IEEE, address=device.ieee), ) ) app.send_packet.reset_mock() # Test sending with source route app.build_source_route_to.return_value = [0x000A, 0x000B] with patch.dict(app.config, {conf.CONF_SOURCE_ROUTING: True}): await send_request(app) app.build_source_route_to.assert_called_once_with(dest=device) app.send_packet.assert_called_once_with( packet.replace(source_route=[0x000A, 0x000B]) ) app.send_packet.reset_mock() # Test sending without waiting for a reply status, msg = await send_request(app, expect_reply=False) app.send_packet.assert_called_once_with( packet.replace(tx_options=t.TransmitOptions.ACK) ) app.send_packet.reset_mock() def test_build_source_route_has_relays(app): device = MagicMock() device.relays = [0x1234, 0x5678] assert app.build_source_route_to(device) == [0x5678, 0x1234] def test_build_source_route_no_relays(app): device = MagicMock() device.relays = None assert app.build_source_route_to(device) is None async def test_send_mrequest(app, packet): status, msg = await app.mrequest( group_id=0xABCD, profile=0x1234, cluster=0x0006, src_ep=0x9A, sequence=0xDE, data=b"test data", hops=12, non_member_radius=34, ) assert status == zigpy.zcl.foundation.Status.SUCCESS assert isinstance(msg, str) app.send_packet.assert_called_once_with( packet.replace( dst=t.AddrModeAddress(addr_mode=t.AddrMode.Group, address=0xABCD), dst_ep=None, radius=12, non_member_radius=34, tx_options=t.TransmitOptions.NONE, ) ) async def test_send_broadcast(app, packet): status, msg = await app.broadcast( profile=0x1234, cluster=0x0006, src_ep=0x9A, dst_ep=0xBC, grpid=0x0000, # unused radius=12, sequence=0xDE, data=b"test data", broadcast_address=t.BroadcastAddress.RX_ON_WHEN_IDLE, ) assert status == zigpy.zcl.foundation.Status.SUCCESS assert isinstance(msg, str) app.send_packet.assert_called_once_with( packet.replace( dst=t.AddrModeAddress( addr_mode=t.AddrMode.Broadcast, address=t.BroadcastAddress.RX_ON_WHEN_IDLE, ), radius=12, tx_options=t.TransmitOptions.NONE, ) ) @pytest.fixture def zdo_packet(app, device): return t.ZigbeePacket( src=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=app.state.node_info.nwk ), src_ep=0x00, # ZDO dst_ep=0x00, tsn=0xDE, profile_id=0x0000, cluster_id=0x0000, data=t.SerializableBytes(b""), source_route=None, extended_timeout=False, tx_options=t.TransmitOptions.ACK, lqi=123, rssi=-80, ) @patch("zigpy.device.Device.initialize", AsyncMock()) async def test_packet_received_new_device_zdo_announce(app, device, zdo_packet): app.handle_join = MagicMock(wraps=app.handle_join) zdo_data = zigpy.zdo.ZDO(None)._serialize( zdo_t.ZDOCmd.Device_annce, *{ "NWKAddr": device.nwk, "IEEEAddr": device.ieee, "Capability": 0x00, }.values(), ) zdo_packet.cluster_id = zdo_t.ZDOCmd.Device_annce zdo_packet.data = t.SerializableBytes( t.uint8_t(zdo_packet.tsn).serialize() + zdo_data ) app.packet_received(zdo_packet) app.handle_join.assert_called_once_with( nwk=device.nwk, ieee=device.ieee, parent_nwk=None ) zigpy_device = app.get_device(ieee=device.ieee) assert zigpy_device.lqi == zdo_packet.lqi assert zigpy_device.rssi == zdo_packet.rssi @patch("zigpy.device.Device.initialize", AsyncMock()) async def test_packet_received_new_device_discovery(app, device, zdo_packet): app.handle_join = MagicMock(wraps=app.handle_join) async def send_packet(packet): if packet.dst_ep != 0x00 or packet.cluster_id != zdo_t.ZDOCmd.IEEE_addr_req: return hdr, args = zigpy.zdo.ZDO(None).deserialize( packet.cluster_id, packet.data.serialize() ) assert args == list( { "NWKAddrOfInterest": device.nwk, "RequestType": zdo_t.AddrRequestType.Single, "StartIndex": 0, }.values() ) zdo_data = zigpy.zdo.ZDO(None)._serialize( zdo_t.ZDOCmd.IEEE_addr_rsp, *{ "Status": zdo_t.Status.SUCCESS, "IEEEAddr": device.ieee, "NWKAddr": device.nwk, "NumAssocDev": 0, "StartIndex": 0, "NWKAddrAssocDevList": [], }.values(), ) # Receive the IEEE address reply zdo_packet.data = t.SerializableBytes( t.uint8_t(zdo_packet.tsn).serialize() + zdo_data ) zdo_packet.cluster_id = zdo_t.ZDOCmd.IEEE_addr_rsp app.packet_received(zdo_packet) app.send_packet = AsyncMock(side_effect=send_packet) # Receive a bogus packet first, to trigger device discovery bogus_packet = zdo_packet.replace(dst_ep=0x01, src_ep=0x01) app.packet_received(bogus_packet) await asyncio.sleep(0.1) app.handle_join.assert_called_once_with( nwk=device.nwk, ieee=device.ieee, parent_nwk=None, handle_rejoin=False ) zigpy_device = app.get_device(ieee=device.ieee) assert zigpy_device.lqi == zdo_packet.lqi assert zigpy_device.rssi == zdo_packet.rssi @patch("zigpy.device.Device.initialize", AsyncMock()) async def test_packet_received_ieee_no_rejoin(app, device, zdo_packet, caplog): device.is_initialized = True app.devices[device.ieee] = device app.handle_join = MagicMock(wraps=app.handle_join) zdo_data = zigpy.zdo.ZDO(None)._serialize( zdo_t.ZDOCmd.IEEE_addr_rsp, *{ "Status": zdo_t.Status.SUCCESS, "IEEEAddr": device.ieee, "NWKAddr": device.nwk, }.values(), ) zdo_packet.cluster_id = zdo_t.ZDOCmd.IEEE_addr_rsp zdo_packet.data = t.SerializableBytes( t.uint8_t(zdo_packet.tsn).serialize() + zdo_data ) app.packet_received(zdo_packet) assert "joined the network" not in caplog.text app.handle_join.assert_called_once_with( nwk=device.nwk, ieee=device.ieee, parent_nwk=None, handle_rejoin=False ) assert len(device.schedule_group_membership_scan.mock_calls) == 0 assert len(device.schedule_initialize.mock_calls) == 0 @patch("zigpy.device.Device.initialize", AsyncMock()) async def test_packet_received_ieee_rejoin(app, device, zdo_packet, caplog): device.is_initialized = True app.devices[device.ieee] = device app.handle_join = MagicMock(wraps=app.handle_join) zdo_data = zigpy.zdo.ZDO(None)._serialize( zdo_t.ZDOCmd.IEEE_addr_rsp, *{ "Status": zdo_t.Status.SUCCESS, "IEEEAddr": device.ieee, "NWKAddr": device.nwk + 1, # NWK has changed }.values(), ) zdo_packet.cluster_id = zdo_t.ZDOCmd.IEEE_addr_rsp zdo_packet.data = t.SerializableBytes( t.uint8_t(zdo_packet.tsn).serialize() + zdo_data ) app.packet_received(zdo_packet) assert "joined the network" not in caplog.text app.handle_join.assert_called_once_with( nwk=device.nwk, ieee=device.ieee, parent_nwk=None, handle_rejoin=False ) assert len(device.schedule_initialize.mock_calls) == 1 async def test_bad_zdo_packet_received(app, device): device.is_initialized = True app.devices[device.ieee] = device bogus_zdo_packet = t.ZigbeePacket( src=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk), src_ep=1, dst=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=0x0000), dst_ep=0, # bad destination endpoint tsn=180, profile_id=260, cluster_id=6, data=t.SerializableBytes(b"\x08n\n\x00\x00\x10\x00"), lqi=255, rssi=-30, ) app.packet_received(bogus_zdo_packet) assert len(device.packet_received.mock_calls) == 1 def test_get_device_with_address_nwk(app, device): app.devices[device.ieee] = device assert ( app.get_device_with_address( t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk) ) is device ) assert ( app.get_device_with_address( t.AddrModeAddress(addr_mode=t.AddrMode.IEEE, address=device.ieee) ) is device ) with pytest.raises(ValueError): app.get_device_with_address( t.AddrModeAddress(addr_mode=t.AddrMode.Group, address=device.nwk) ) with pytest.raises(KeyError): app.get_device_with_address( t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk + 1) ) async def test_request_future_matching(app, make_initialized_device): device = make_initialized_device(app) ota = device.endpoints[1].add_output_cluster(clusters.general.Ota.cluster_id) req_hdr, req_cmd = ota._create_request( general=False, command_id=ota.commands_by_name["query_next_image"].id, schema=ota.commands_by_name["query_next_image"].schema, disable_default_response=False, direction=foundation.Direction.Client_to_Server, args=(), kwargs={ "field_control": 0, "manufacturer_code": 0x1234, "image_type": 0x5678, "current_file_version": 0x11112222, }, ) packet = t.ZigbeePacket( src=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk), src_ep=1, dst=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=0x0000), dst_ep=1, tsn=req_hdr.tsn, profile_id=260, cluster_id=ota.cluster_id, data=t.SerializableBytes(req_hdr.serialize() + req_cmd.serialize()), lqi=255, rssi=-30, ) assert not app._req_listeners[device] with app.wait_for_response( device, [ota.commands_by_name["query_next_image"].schema()] ) as rsp_fut: # Attach two listeners with app.wait_for_response( device, [ota.commands_by_name["query_next_image"].schema()] ) as rsp_fut2: assert app._req_listeners[device] # Listeners are resolved FIFO app.packet_received(packet) assert rsp_fut.done() assert not rsp_fut2.done() app.packet_received(packet) assert rsp_fut.done() assert rsp_fut2.done() # Unhandled packets are ignored app.packet_received(packet) rsp_hdr, rsp_cmd = await rsp_fut assert rsp_hdr == req_hdr assert rsp_cmd == req_cmd assert rsp_cmd.current_file_version == 0x11112222 assert not app._req_listeners[device] async def test_request_callback_matching(app, make_initialized_device): device = make_initialized_device(app) ota = device.endpoints[1].add_output_cluster(clusters.general.Ota.cluster_id) req_hdr, req_cmd = ota._create_request( general=False, command_id=ota.commands_by_name["query_next_image"].id, schema=ota.commands_by_name["query_next_image"].schema, disable_default_response=False, direction=foundation.Direction.Client_to_Server, args=(), kwargs={ "field_control": 0, "manufacturer_code": 0x1234, "image_type": 0x5678, "current_file_version": 0x11112222, }, ) packet = t.ZigbeePacket( src=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk), src_ep=1, dst=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=0x0000), dst_ep=1, tsn=req_hdr.tsn, profile_id=260, cluster_id=ota.cluster_id, data=t.SerializableBytes(req_hdr.serialize() + req_cmd.serialize()), lqi=255, rssi=-30, ) mock_callback = mock.Mock() assert not app._req_listeners[device] with app.callback_for_response( device, [ota.commands_by_name["query_next_image"].schema()], mock_callback ): assert app._req_listeners[device] asyncio.get_running_loop().call_soon(app.packet_received, packet) asyncio.get_running_loop().call_soon(app.packet_received, packet) asyncio.get_running_loop().call_soon(app.packet_received, packet) await asyncio.sleep(0.1) assert len(mock_callback.mock_calls) == 3 assert mock_callback.mock_calls == [mock.call(req_hdr, req_cmd)] * 3 assert not app._req_listeners[device] async def test_energy_scan_default(app): await app.startup() raw_scan_results = [ 170, 191, 181, 165, 179, 169, 196, 163, 174, 162, 190, 186, 191, 178, 204, 187, ] coordinator = app._device coordinator.zdo.Mgmt_NWK_Update_req = AsyncMock( return_value=[ zdo_t.Status.SUCCESS, t.Channels.ALL_CHANNELS, 29, 10, raw_scan_results, ] ) results = await app.energy_scan( channels=t.Channels.ALL_CHANNELS, duration_exp=2, count=1 ) assert len(results) == 16 assert results == dict(zip(range(11, 26 + 1), raw_scan_results)) async def test_energy_scan_not_implemented(app): """Energy scanning still "works" even when the radio doesn't implement it.""" await app.startup() app._device.zdo.Mgmt_NWK_Update_req.side_effect = asyncio.TimeoutError() results = await app.energy_scan( channels=t.Channels.ALL_CHANNELS, duration_exp=2, count=1 ) assert results == {c: 0 for c in range(11, 26 + 1)} @pytest.mark.parametrize( "scan, message_present", [ ({c: 0 for c in t.Channels.ALL_CHANNELS}, False), ({c: 255 for c in t.Channels.ALL_CHANNELS}, True), ], ) async def test_startup_energy_scan(app, caplog, scan, message_present): with mock.patch.object(app, "energy_scan", return_value=scan): with caplog.at_level(logging.WARNING): await app.startup() if message_present: assert "Zigbee channel 15 utilization is 100.00%" in caplog.text else: assert "Zigbee channel" not in caplog.text async def test_startup_broadcast_failure_due_to_interference(app, caplog): err = DeliveryError( "Failed to deliver packet: ", 225 ) with mock.patch.object(app, "permit", side_effect=err): with caplog.at_level(logging.WARNING): await app.startup() # The application will still start up, however assert "Failed to send startup broadcast" in caplog.text assert "interference" in caplog.text async def test_startup_broadcast_failure_other(app, caplog): with mock.patch.object(app, "permit", side_effect=DeliveryError("Error", 123)): with pytest.raises(DeliveryError, match="^Error$"): await app.startup() @patch("zigpy.application.CHANNEL_CHANGE_SETTINGS_RELOAD_DELAY_S", 0.1) @patch("zigpy.application.CHANNEL_CHANGE_BROADCAST_DELAY_S", 0.01) async def test_move_network_to_new_channel(app): async def nwk_update(*args, **kwargs): async def inner(): await asyncio.sleep( zigpy.application.CHANNEL_CHANGE_SETTINGS_RELOAD_DELAY_S * 5 ) NwkUpdate = args[0] app.state.network_info.channel = list(NwkUpdate.ScanChannels)[0] app.state.network_info.nwk_update_id = NwkUpdate.nwkUpdateId asyncio.create_task(inner()) # noqa: RUF006 await app.startup() assert app.state.network_info.channel != 26 with patch.object( app._device.zdo, "Mgmt_NWK_Update_req", side_effect=nwk_update ) as mock_update: await app.move_network_to_channel(new_channel=26, num_broadcasts=10) assert app.state.network_info.channel == 26 assert len(mock_update.mock_calls) == 1 async def test_move_network_to_new_channel_noop(app): await app.startup() old_channel = app.state.network_info.channel with patch("zigpy.zdo.broadcast") as mock_broadcast: await app.move_network_to_channel(new_channel=old_channel) assert app.state.network_info.channel == old_channel assert len(mock_broadcast.mock_calls) == 0 async def test_startup_multiple_dblistener(app): app._dblistener = AsyncMock() app.connect = AsyncMock(side_effect=RuntimeError()) with pytest.raises(RuntimeError): await app.startup() with pytest.raises(RuntimeError): await app.startup() # The database listener will not be shut down automatically assert len(app._dblistener.shutdown.mock_calls) == 0 async def test_connection_lost(app): exc = RuntimeError() listener = MagicMock() app.add_listener(listener) app.connection_lost(exc) listener.connection_lost.assert_called_with(exc) async def test_watchdog(app): error = RuntimeError() app = make_app({}) app._watchdog_period = 0.1 app._watchdog_feed = AsyncMock(side_effect=[None, None, error]) app.connection_lost = MagicMock() assert app._watchdog_task is None await app.startup() assert app._watchdog_task is not None # We call it once during startup synchronously assert app._watchdog_feed.mock_calls == [call()] assert app.connection_lost.mock_calls == [] await asyncio.sleep(0.5) assert app._watchdog_feed.mock_calls == [call(), call(), call()] assert app.connection_lost.mock_calls == [call(error)] assert app._watchdog_task.done() async def test_permit_with_key(app): app = make_app({}) app.permit_with_link_key = AsyncMock() with pytest.raises(ValueError): await app.permit_with_key( node=t.EUI64.convert("aa:bb:cc:dd:11:22:33:44"), code=b"invalid code that is far too long and of the wrong parity", time_s=60, ) assert app.permit_with_link_key.mock_calls == [] await app.permit_with_key( node=t.EUI64.convert("aa:bb:cc:dd:11:22:33:44"), code=bytes.fromhex("11223344556677884AF7"), time_s=60, ) assert app.permit_with_link_key.mock_calls == [ call( node=t.EUI64.convert("aa:bb:cc:dd:11:22:33:44"), link_key=t.KeyData.convert("41618FC0C83B0E14A589954B16E31466"), time_s=60, ) ] async def test_probe(app): class BaudSpecificApp(App): _probe_configs = [ {conf.CONF_DEVICE_BAUDRATE: 57600}, {conf.CONF_DEVICE_BAUDRATE: 115200}, ] async def connect(self): if self._config[conf.CONF_DEVICE][conf.CONF_DEVICE_BAUDRATE] != 115200: raise asyncio.TimeoutError() # Only one baudrate is valid assert (await BaudSpecificApp.probe({conf.CONF_DEVICE_PATH: "/dev/null"})) == { conf.CONF_DEVICE_PATH: "/dev/null", conf.CONF_DEVICE_BAUDRATE: 115200, conf.CONF_DEVICE_FLOW_CONTROL: None, } class NeverConnectsApp(App): async def connect(self): raise asyncio.TimeoutError() # No settings will work assert (await NeverConnectsApp.probe({conf.CONF_DEVICE_PATH: "/dev/null"})) is False zigpy-0.62.3/tests/test_backups.py000066400000000000000000000276421456054056700171760ustar00rootroot00000000000000from datetime import datetime, timedelta, timezone import json import pytest import zigpy.backups import zigpy.state as app_state import zigpy.types as t import zigpy.zdo.types as zdo_t from tests.async_mock import AsyncMock from tests.conftest import app # noqa: F401 @pytest.fixture def backup_factory(): def inner(): return zigpy.backups.NetworkBackup( backup_time=datetime(2021, 2, 8, 19, 35, 24, 761000, tzinfo=timezone.utc), node_info=app_state.NodeInfo( nwk=t.NWK(0x0000), ieee=t.EUI64.convert("93:2C:A9:34:D9:D0:5D:12"), logical_type=zdo_t.LogicalType.Coordinator, model="Coordinator Model", manufacturer="Coordinator Manufacturer", version="1.2.3.4", ), network_info=app_state.NetworkInfo( extended_pan_id=t.ExtendedPanId.convert("0D:49:91:99:AE:CD:3C:35"), pan_id=t.PanId(0x9BB0), nwk_update_id=0x12, nwk_manager_id=t.NWK(0x0000), channel=t.uint8_t(15), channel_mask=t.Channels.from_channel_list([15, 20, 25]), security_level=t.uint8_t(5), network_key=app_state.Key( key=t.KeyData.convert( "9A:79:D6:9A:DA:EC:45:C6:F2:EF:EB:AF:DA:A3:07:B6" ), seq=108, tx_counter=39009277, ), tc_link_key=app_state.Key( key=t.KeyData(b"ZigBeeAlliance09"), partner_ieee=t.EUI64.convert("93:2C:A9:34:D9:D0:5D:12"), tx_counter=8712428, ), key_table=[ app_state.Key( key=t.KeyData.convert( "85:7C:05:00:3E:76:1A:F9:68:9A:49:41:6A:60:5C:76" ), tx_counter=3792973670, rx_counter=1083290572, seq=147, partner_ieee=t.EUI64.convert("69:0C:07:52:AA:D7:7D:71"), ), app_state.Key( key=t.KeyData.convert( "CA:02:E8:BB:75:7C:94:F8:93:39:D3:9C:B3:CD:A7:BE" ), tx_counter=2597245184, rx_counter=824424412, seq=19, partner_ieee=t.EUI64.convert("A3:1A:F6:8E:19:95:23:BE"), ), ], children=[ # Has a key t.EUI64.convert("A3:1A:F6:8E:19:95:23:BE"), # Random device with no NWK address or key t.EUI64.convert("A4:02:A0:DC:17:D8:17:DF"), # Does not have a key t.EUI64.convert("C6:DF:28:F9:60:33:DB:03"), ], # If exposed by the stack, NWK addresses of other connected devices on the network nwk_addresses={ # Two children above t.EUI64.convert("A3:1A:F6:8E:19:95:23:BE"): t.NWK(0x2C59), t.EUI64.convert("C6:DF:28:F9:60:33:DB:03"): t.NWK(0x1CA0), # Random devices on the network t.EUI64.convert("7A:BF:38:A9:59:21:A0:7A"): t.NWK(0x16B5), t.EUI64.convert("10:55:FE:67:24:EA:96:D3"): t.NWK(0xBFB9), t.EUI64.convert("9A:0E:10:50:00:1B:1A:5F"): t.NWK(0x1AF6), }, stack_specific={ "zstack": {"tclk_seed": "71e31105bb92a2d15747a0d0a042dbfd"} }, metadata={"zstack": {"version": "20220102"}}, ), ) return inner @pytest.fixture def backup(backup_factory): return backup_factory() @pytest.fixture def z2m_backup_json(): return { "metadata": { "format": "zigpy/open-coordinator-backup", "version": 1, "source": "zigbee-herdsman@0.13.65", "internal": {"date": "2021-02-08T19:35:24.761Z", "znpVersion": 2}, }, "stack_specific": {"zstack": {"tclk_seed": "71e31105bb92a2d15747a0d0a042dbfd"}}, "coordinator_ieee": "932ca934d9d05d12", "pan_id": "9bb0", "extended_pan_id": "0d499199aecd3c35", "nwk_update_id": 18, "security_level": 5, "channel": 15, "channel_mask": [15, 20, 25], "network_key": { "key": "9a79d69adaec45c6f2efebafdaa307b6", "sequence_number": 108, "frame_counter": 39009277, }, "devices": [ { "nwk_address": "2c59", "ieee_address": "a31af68e199523be", "link_key": { "key": "ca02e8bb757c94f89339d39cb3cda7be", "tx_counter": 2597245184, "rx_counter": 824424412, }, # "is_child": True, # Implicitly a child device }, { "nwk_address": None, "ieee_address": "690c0752aad77d71", "link_key": { "key": "857c05003e761af9689a49416a605c76", "tx_counter": 3792973670, "rx_counter": 1083290572, }, "is_child": False, }, { "nwk_address": None, "ieee_address": "a402a0dc17d817df", "is_child": True, }, { "nwk_address": "1ca0", "ieee_address": "c6df28f96033db03", "is_child": True, }, { "nwk_address": "16b5", "ieee_address": "7abf38a95921a07a", "is_child": False, }, { "nwk_address": "bfb9", "ieee_address": "1055fe6724ea96d3", "is_child": False, }, { "nwk_address": "1af6", "ieee_address": "9a0e1050001b1a5f", "is_child": False, }, ], } @pytest.fixture def zigate_backup_json(): return { "backup_time": "2022-07-20T17:58:16.694438+00:00", "network_info": { "extended_pan_id": "9d:ff:72:2d:19:2c:d1:01", "pan_id": "D08A", "nwk_update_id": 0, # missing "nwk_manager_id": "0000", "channel": 15, "channel_mask": [15], "security_level": 5, "network_key": { # missing "key": "ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff", "tx_counter": 0, "rx_counter": 0, "seq": 0, "partner_ieee": "ff:ff:ff:ff:ff:ff:ff:ff", }, "tc_link_key": { # missing "key": "5a:69:67:42:65:65:41:6c:6c:69:61:6e:63:65:30:39", "tx_counter": 0, "rx_counter": 0, "seq": 0, "partner_ieee": "00:15:8d:00:06:a3:fd:fe", }, "key_table": [], "children": [], "nwk_addresses": {}, "stack_specific": {}, "metadata": {"zigate": {"version": "3.21"}}, "source": "zigpy-zigate@0.9.0", }, "node_info": { "nwk": "0000", "ieee": "00:15:8d:00:06:a3:fd:fe", "logical_type": "coordinator", }, } def test_state_backup_as_dict(backup): obj = json.loads(json.dumps(backup.as_dict())) restored_backup = type(backup).from_dict(obj) assert backup == restored_backup def test_state_backup_as_open_coordinator(backup): obj = json.loads(json.dumps(backup.as_open_coordinator_json())) backup2 = zigpy.backups.NetworkBackup.from_open_coordinator_json(obj) assert backup == backup2 def test_z2m_backup_parsing(z2m_backup_json, backup): backup.network_info.metadata = None backup.network_info.source = None backup.node_info.manufacturer = None backup.node_info.model = None backup.node_info.version = None backup.network_info.tc_link_key.tx_counter = 0 for key in backup.network_info.key_table: key.seq = 0 backup2 = zigpy.backups.NetworkBackup.from_open_coordinator_json(z2m_backup_json) backup2.network_info.metadata = None backup2.network_info.source = None # Key order may be different backup.network_info.key_table.sort(key=lambda k: k.key) backup2.network_info.key_table.sort(key=lambda k: k.key) assert backup == backup2 def test_from_dict_automatic(z2m_backup_json): backup1 = zigpy.backups.NetworkBackup.from_open_coordinator_json(z2m_backup_json) backup2 = zigpy.backups.NetworkBackup.from_dict(z2m_backup_json) assert backup1 == backup2 def test_from_dict_failure(): with pytest.raises(ValueError): zigpy.backups.NetworkBackup.from_dict({"some": "json"}) def test_backup_compatibility(backup_factory): backup1 = backup_factory() assert backup1.is_compatible_with(backup1) # Incompatible due to different coordinator IEEE backup2 = backup_factory() backup2.node_info.ieee = t.EUI64.convert("AA:AA:AA:AA:AA:AA:AA:AA") assert not backup2.supersedes(backup1) assert not backup1.supersedes(backup2) assert not backup1.is_compatible_with(backup2) # NWK frame counter must always be greater backup3 = backup_factory() backup3.network_info.network_key.tx_counter -= 1 assert backup3.is_compatible_with(backup1) assert not backup3.supersedes(backup1) backup4 = backup_factory() backup4.network_info.network_key.tx_counter += 1 assert backup4.is_compatible_with(backup1) assert backup4.supersedes(backup1) async def test_backup_completeness(backup, zigate_backup_json): assert backup.is_complete() zigate_backup = zigpy.backups.NetworkBackup.from_dict(zigate_backup_json) assert not zigate_backup.is_complete() backups = zigpy.backups.BackupManager(None) with pytest.raises(ValueError): await backups.restore_backup(zigate_backup) async def test_add_backup(backup_factory): backups = zigpy.backups.BackupManager(None) # First backup backup1 = backup_factory() backups.add_backup(backup1) assert backups.backups == [backup1] # Adding the same backup twice will do nothing backups.add_backup(backup1) assert backups.backups == [backup1] # Adding an identical backup that is newer replaces the old one backup2 = backup_factory() backup2.backup_time += timedelta(hours=1) backups.add_backup(backup2) assert backups.backups == [backup2] # An even more recent one with a rolled back frame counter is appended backup3 = backup_factory() backup3.backup_time += timedelta(hours=2) backup3.network_info.network_key.tx_counter -= 1000 backups.add_backup(backup3) assert backups.backups == [backup2, backup3] # A final one replacing them both is added backup4 = backup_factory() backup4.backup_time += timedelta(hours=3) backup4.network_info.network_key.tx_counter += 1000 backups.add_backup(backup4) assert backups.backups == [backup4] # An incompatible backup will be added to the list. Nothing will be replaced. backup5 = backup_factory() backup5.network_info.pan_id += 1 backups.add_backup(backup5) assert backups.backups == [backup4, backup5] async def test_restore_backup_create_new(app, backup): backups = zigpy.backups.BackupManager(app) backups.create_backup = AsyncMock() await backups.restore_backup(backup) app.write_network_info.assert_called_once() backups.create_backup.assert_called_once() app.write_network_info.reset_mock() backups.create_backup.reset_mock() await backups.restore_backup(backup, create_new=False) app.write_network_info.assert_called_once() backups.create_backup.assert_not_called() # Won't be called zigpy-0.62.3/tests/test_config.py000066400000000000000000000117371456054056700170110ustar00rootroot00000000000000"""Test configuration.""" import warnings import pytest import voluptuous as vol import zigpy.config import zigpy.config.validators @pytest.mark.parametrize( "value, result", [ (False, False), (True, True), ("1", True), ("yes", True), ("YeS", True), ("on", True), ("oN", True), ("enable", True), ("enablE", True), (0, False), ("no", False), ("nO", False), ("off", False), ("ofF", False), ("disable", False), ("disablE", False), ], ) def test_config_validation_bool(value, result): """Test boolean config validation.""" assert zigpy.config.validators.cv_boolean(value) is result schema = vol.Schema({vol.Required("value"): zigpy.config.validators.cv_boolean}) validated = schema({"value": value}) assert validated["value"] is result @pytest.mark.parametrize("value", ["invalid", "not a bool", "something"]) def test_config_validation_bool_invalid(value): """Test boolean config validation.""" with pytest.raises(vol.Invalid): zigpy.config.validators.cv_boolean(value) def test_config_validation_key_not_16_list(): """Validate key fails.""" with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([0x00]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([0x00 for i in range(15)]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([0x00 for i in range(17)]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key(None) zigpy.config.validators.cv_key([0x00 for i in range(16)]) def test_config_validation_key_not_a_byte(): """Validate key fails.""" with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([-1 for i in range(16)]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([256 for i in range(16)]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([0] * 15 + [256]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([0] * 15 + [-1]) with pytest.raises(vol.Invalid): zigpy.config.validators.cv_key([0] * 15 + ["x1"]) zigpy.config.validators.cv_key([0xFF for i in range(16)]) def test_config_validation_key_success(): """Validate key success.""" key = zigpy.config.validators.cv_key(zigpy.config.CONF_NWK_TC_LINK_KEY_DEFAULT) assert key.serialize() == b"ZigBeeAlliance09" @pytest.mark.parametrize( "value, result", ( (0x1234, 0x1234), ("0x1234", 0x1234), (1234, 1234), ("1234", 1234), ("001234", 1234), ("0e1234", vol.Invalid), ("1234abcd", vol.Invalid), ("0xabGG", vol.Invalid), (None, vol.Invalid), ), ) def test_config_validation_hex_number(value, result): """Test hex number config validation.""" if isinstance(result, int): assert zigpy.config.validators.cv_hex(value) == result else: with pytest.raises(vol.Invalid): zigpy.config.validators.cv_hex(value) @pytest.mark.parametrize( "value, result", ( (1, vol.Invalid), (11, 11), (0x11, 17), ("26", 26), (27, vol.Invalid), ("27", vol.Invalid), ), ) def test_schema_network_channel(value, result): """Test network schema for channel.""" config = {zigpy.config.CONF_NWK_CHANNEL: value} if isinstance(result, int): config = zigpy.config.SCHEMA_NETWORK(config) assert config[zigpy.config.CONF_NWK_CHANNEL] == result else: with pytest.raises(vol.Invalid): zigpy.config.SCHEMA_NETWORK(config) def test_schema_network_pan_id(): """Test Extended Pan-id.""" config = zigpy.config.SCHEMA_NETWORK({}) assert ( config[zigpy.config.CONF_NWK_EXTENDED_PAN_ID] == zigpy.config.CONF_NWK_EXTENDED_PAN_ID_DEFAULT ) config = zigpy.config.SCHEMA_NETWORK( {zigpy.config.CONF_NWK_EXTENDED_PAN_ID: "00:11:22:33:44:55:66:77"} ) assert ( config[zigpy.config.CONF_NWK_EXTENDED_PAN_ID].serialize() == b"\x77\x66\x55\x44\x33\x22\x11\x00" ) def test_schema_network_short_pan_id(): """Test Pan-id.""" config = zigpy.config.SCHEMA_NETWORK({}) assert config[zigpy.config.CONF_NWK_PAN_ID] is None config = zigpy.config.SCHEMA_NETWORK({zigpy.config.CONF_NWK_PAN_ID: 0x1234}) assert config[zigpy.config.CONF_NWK_PAN_ID].serialize() == b"\x34\x12" def test_deprecated(): """Test key deprecation.""" schema = vol.Schema( { vol.Optional("value"): vol.All( zigpy.config.validators.cv_hex, zigpy.config.validators.cv_deprecated("Test message"), ) } ) with pytest.warns(DeprecationWarning, match="Test message"): assert schema({"value": 123}) == {"value": 123} # No warnings are raised with warnings.catch_warnings(): warnings.simplefilter("error") assert schema({}) == {} zigpy-0.62.3/tests/test_datastructures.py000066400000000000000000000133701456054056700206140ustar00rootroot00000000000000import asyncio import pytest from zigpy import datastructures async def test_dynamic_bounded_semaphore_simple_locking(): """Test simple, serial locking/unlocking.""" sem = datastructures.PriorityDynamicBoundedSemaphore() assert "unlocked" not in repr(sem) and "locked" in repr(sem) assert sem.value == 0 assert sem.max_value == 0 assert sem.locked() # Invalid max value with pytest.raises(ValueError): sem.max_value = -1 assert sem.value == 0 assert sem.max_value == 0 assert sem.locked() # Max value is now specified sem.max_value = 1 assert not sem.locked() assert sem.max_value == 1 assert sem.value == 1 assert "unlocked" in repr(sem) # Semaphore can now be acquired async with sem: assert sem.value == 0 assert sem.locked() assert not sem.locked() assert sem.max_value == 1 assert sem.value == 1 await sem.acquire() assert sem.value == 0 assert sem.locked() sem.release() assert not sem.locked() assert sem.max_value == 1 assert sem.value == 1 with pytest.raises(ValueError): sem.release() async def test_dynamic_bounded_semaphore_multiple_locking(): """Test multiple locking/unlocking.""" sem = datastructures.PriorityDynamicBoundedSemaphore(5) assert sem.value == 5 assert not sem.locked() async with sem: assert sem.value == 4 assert not sem.locked() async with sem, sem, sem: assert sem.value == 1 assert not sem.locked() with pytest.raises(RuntimeError): async with sem: assert sem.locked() assert sem.value == 0 raise RuntimeError() assert not sem.locked() assert sem.value == 1 assert sem.value == 4 assert not sem.locked() assert sem.value == 5 assert not sem.locked() async def test_dynamic_bounded_semaphore_runtime_limit_increase(event_loop): """Test changing the max_value at runtime.""" sem = datastructures.PriorityDynamicBoundedSemaphore(2) def set_limit(n): sem.max_value = n event_loop.call_later(0.1, set_limit, 3) async with sem: # Play with the value, testing edge cases sem.max_value = 100 assert sem.value == 99 assert not sem.locked() sem.max_value = 2 assert sem.value == 1 assert not sem.locked() sem.max_value = 1 assert sem.value == 0 assert sem.locked() # Setting it to `0` seems undefined but we keep track of locks so it works sem.max_value = 0 assert sem.value == -1 assert sem.locked() sem.max_value = 2 assert sem.value == 1 assert not sem.locked() async with sem: assert sem.locked() assert sem.value == 0 assert sem.max_value == 2 async with sem: # We're now locked until the limit is increased pass assert not sem.locked() assert sem.value == 1 assert sem.max_value == 3 assert sem.value == 2 assert sem.max_value == 3 assert sem.value == 3 assert sem.max_value == 3 async def test_dynamic_bounded_semaphore_errors(event_loop): """Test semaphore handling errors and cancellation.""" sem = datastructures.PriorityDynamicBoundedSemaphore(1) def set_limit(n): sem.max_value = n async def acquire(): async with sem: await asyncio.sleep(60) # The first acquire call will succeed acquire1 = asyncio.create_task(acquire()) # The remaining two will stall acquire2 = asyncio.create_task(acquire()) acquire3 = asyncio.create_task(acquire()) await asyncio.sleep(0.1) # Cancel the first one, which holds the lock acquire1.cancel() # But also cancel the second one, which was waiting acquire2.cancel() with pytest.raises(asyncio.CancelledError): await acquire1 with pytest.raises(asyncio.CancelledError): await acquire2 await asyncio.sleep(0.1) # The third one will have succeeded assert sem.locked() assert sem.value == 0 assert sem.max_value == 1 acquire3.cancel() with pytest.raises(asyncio.CancelledError): await acquire3 assert not sem.locked() assert sem.value == 1 assert sem.max_value == 1 async def test_priority_lock(event_loop): """Test priority lock.""" lock = datastructures.PriorityLock() with pytest.raises(ValueError): lock.max_value = 2 assert lock.max_value == 1 # Default priority of 0 async with lock: pass # Overridden priority of 100 async with lock(priority=100): pass run_order = [] async def test_priority(priority: int, item: str): assert lock.locked() async with lock(priority=priority): run_order.append(item) # Lock first async with lock: assert lock.locked() names = { "1: first": 1, "5: first": 5, "1: second": 1, "1: third": 1, "5: second": 5, "-5: only": -5, "1: fourth": 1, "2: only": 2, } tasks = { name: asyncio.create_task(test_priority(priority + 0, name + "")) for name, priority in names.items() } await asyncio.sleep(0) tasks["1: second"].cancel() await asyncio.sleep(0) await asyncio.gather(*tasks.values(), return_exceptions=True) assert run_order == [ "5: first", "5: second", "2: only", "1: first", # "1: second", "1: third", "1: fourth", "-5: only", ] zigpy-0.62.3/tests/test_device.py000066400000000000000000000745061456054056700170060ustar00rootroot00000000000000import asyncio from datetime import datetime, timezone import logging from unittest.mock import call import pytest from zigpy import device, endpoint import zigpy.application import zigpy.exceptions import zigpy.ota.image as firmware from zigpy.profiles import zha import zigpy.state import zigpy.types as t from zigpy.zcl.clusters.general import Basic, Ota import zigpy.zcl.foundation as foundation from zigpy.zdo import types as zdo_t from .async_mock import ANY, AsyncMock, MagicMock, int_sentinel, patch, sentinel @pytest.fixture def dev(monkeypatch, app_mock): monkeypatch.setattr(device, "APS_REPLY_TIMEOUT_EXTENDED", 0.1) ieee = t.EUI64(map(t.uint8_t, [0, 1, 2, 3, 4, 5, 6, 7])) dev = device.Device(app_mock, ieee, 65535) node_desc = zdo_t.NodeDescriptor(1, 1, 1, 4, 5, 6, 7, 8) with patch.object( dev.zdo, "Node_Desc_req", new=AsyncMock(return_value=(0, 0xFFFF, node_desc)) ): yield dev async def test_initialize(monkeypatch, dev): async def mockrequest(nwk, tries=None, delay=None): return [0, None, [0, 1, 2, 3, 4]] async def mockepinit(self, *args, **kwargs): self.status = endpoint.Status.ZDO_INIT self.add_input_cluster(Basic.cluster_id) async def mock_ep_get_model_info(self): if self.endpoint_id == 1: return None, None elif self.endpoint_id == 2: return "Model", None elif self.endpoint_id == 3: return None, "Manufacturer" else: return "Model2", "Manufacturer2" monkeypatch.setattr(endpoint.Endpoint, "initialize", mockepinit) monkeypatch.setattr(endpoint.Endpoint, "get_model_info", mock_ep_get_model_info) dev.zdo.Active_EP_req = mockrequest await dev.initialize() assert dev.endpoints[0] is dev.zdo assert 1 in dev.endpoints assert 2 in dev.endpoints assert 3 in dev.endpoints assert 4 in dev.endpoints assert dev._application.device_initialized.call_count == 1 assert dev.is_initialized # First one for each is chosen assert dev.model == "Model" assert dev.manufacturer == "Manufacturer" dev.schedule_initialize() assert dev._application.device_initialized.call_count == 2 await dev.initialize() assert dev._application.device_initialized.call_count == 3 async def test_initialize_fail(dev): async def mockrequest(nwk, tries=None, delay=None): return [1, dev.nwk, []] dev.zdo.Active_EP_req = mockrequest await dev.initialize() assert not dev.is_initialized assert not dev.has_non_zdo_endpoints @patch("zigpy.device.Device.get_node_descriptor", AsyncMock()) async def test_initialize_ep_failed(monkeypatch, dev): async def mockrequest(req, nwk, tries=None, delay=None): return [0, None, [1, 2]] async def mockepinit(self): raise AttributeError monkeypatch.setattr(endpoint.Endpoint, "initialize", mockepinit) dev.zdo.request = mockrequest await dev.initialize() assert not dev.is_initialized assert dev.application.listener_event.call_count == 1 assert dev.application.listener_event.call_args[0][0] == "device_init_failure" async def test_request(dev): seq = int_sentinel.tsn async def mock_req(*args, **kwargs): dev._pending[seq].result.set_result(sentinel.result) dev.application.send_packet = AsyncMock(side_effect=mock_req) r = await dev.request(1, 2, 3, 3, seq, b"") assert r is sentinel.result assert dev._application.send_packet.call_count == 1 async def test_request_without_reply(dev): seq = int_sentinel.tsn dev._pending.new = MagicMock() dev.application.send_packet = AsyncMock() r = await dev.request(1, 2, 3, 3, seq, b"", expect_reply=False) assert r is None assert dev._application.send_packet.call_count == 1 assert len(dev._pending.new.mock_calls) == 0 async def test_request_tsn_error(dev): seq = int_sentinel.tsn dev._pending.new = MagicMock(side_effect=zigpy.exceptions.ControllerException()) dev.application.request = MagicMock() dev.application.send_packet = AsyncMock() # We don't leave a dangling coroutine on error with pytest.raises(zigpy.exceptions.ControllerException): await dev.request(1, 2, 3, 3, seq, b"") assert dev._application.send_packet.call_count == 0 assert dev._application.request.call_count == 0 assert len(dev._pending.new.mock_calls) == 1 async def test_failed_request(dev): assert dev.last_seen is None dev._application.send_packet = AsyncMock( side_effect=zigpy.exceptions.DeliveryError("Uh oh") ) with pytest.raises(zigpy.exceptions.DeliveryError): await dev.request(1, 2, 3, 4, 5, b"") assert dev.last_seen is None def test_skip_configuration(dev): assert dev.skip_configuration is False dev.skip_configuration = True assert dev.skip_configuration is True def test_radio_details(dev): dev.radio_details(1, 2) assert dev.lqi == 1 assert dev.rssi == 2 dev.radio_details(lqi=3) assert dev.lqi == 3 assert dev.rssi == 2 dev.radio_details(rssi=4) assert dev.lqi == 3 assert dev.rssi == 4 async def test_handle_message_read_report_conf(dev): ep = dev.add_endpoint(3) ep.add_input_cluster(0x702) tsn = 0x56 req_mock = MagicMock() dev._pending[tsn] = req_mock # Read Report Configuration Success rsp = dev.handle_message( 0x104, # profile 0x702, # cluster 3, # source EP 3, # dest EP b"\x18\x56\x09\x00\x00\x00\x00\x25\x1e\x00\x84\x03\x01\x02\x03\x04\x05\x06", # message ) # Returns decoded msg when response is not pending, None otherwise assert rsp is None assert req_mock.result.set_result.call_count == 1 cfg_sup1 = req_mock.result.set_result.call_args[0][0].attribute_configs[0] assert isinstance(cfg_sup1, zigpy.zcl.foundation.AttributeReportingConfigWithStatus) assert cfg_sup1.status == zigpy.zcl.foundation.Status.SUCCESS assert cfg_sup1.config.direction == 0 assert cfg_sup1.config.attrid == 0 assert cfg_sup1.config.datatype == 0x25 assert cfg_sup1.config.min_interval == 30 assert cfg_sup1.config.max_interval == 900 assert cfg_sup1.config.reportable_change == 0x060504030201 # Unsupported attributes tsn2 = 0x5B req_mock2 = MagicMock() dev._pending[tsn2] = req_mock2 rsp2 = dev.handle_message( 0x104, # profile 0x702, # cluster 3, # source EP 3, # dest EP b"\x18\x5b\x09\x86\x00\x00\x00\x86\x00\x12\x00\x86\x00\x00\x04", # message 3x("Unsupported attribute" response) ) # Returns decoded msg when response is not pending, None otherwise assert rsp2 is None cfg_unsup1, cfg_unsup2, cfg_unsup3 = req_mock2.result.set_result.call_args[0][ 0 ].attribute_configs assert ( cfg_unsup1.status == cfg_unsup2.status == cfg_unsup3.status == zigpy.zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE ) assert cfg_unsup1.config.direction == 0x00 and cfg_unsup1.config.attrid == 0x0000 assert cfg_unsup2.config.direction == 0x00 and cfg_unsup2.config.attrid == 0x0012 assert cfg_unsup3.config.direction == 0x00 and cfg_unsup3.config.attrid == 0x0400 # One supported, one unsupported tsn3 = 0x5C req_mock3 = MagicMock() dev._pending[tsn3] = req_mock3 rsp3 = dev.handle_message( 0x104, # profile 0x702, # cluster 3, # source EP 3, # dest EP b"\x18\x5c\x09\x86\x00\x00\x00\x00\x00\x00\x00\x25\x1e\x00\x84\x03\x01\x02\x03\x04\x05\x06", ) assert rsp3 is None cfg_unsup4, cfg_sup2 = req_mock3.result.set_result.call_args[0][0].attribute_configs assert cfg_unsup4.status == zigpy.zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE assert cfg_sup2.status == zigpy.zcl.foundation.Status.SUCCESS assert cfg_sup2.serialize() == cfg_sup1.serialize() async def test_handle_message_deserialize_error(dev): ep = dev.add_endpoint(3) dev.deserialize = MagicMock(side_effect=ValueError) ep.handle_message = MagicMock() dev.handle_message(99, 98, 3, 3, b"abcd") assert ep.handle_message.call_count == 0 def test_endpoint_getitem(dev): ep = dev.add_endpoint(3) assert dev[3] is ep with pytest.raises(KeyError): dev[1] async def test_broadcast(app_mock): app_mock.state.node_info.ieee = t.EUI64.convert("08:09:0A:0B:0C:0D:0E:0F") (profile, cluster, src_ep, dst_ep, data) = ( zha.PROFILE_ID, 1, 2, 3, b"\x02\x01\x00", ) await device.broadcast(app_mock, profile, cluster, src_ep, dst_ep, 0, 0, 123, data) assert app_mock.send_packet.call_count == 1 packet = app_mock.send_packet.mock_calls[0].args[0] assert packet.profile_id == profile assert packet.cluster_id == cluster assert packet.src_ep == src_ep assert packet.dst_ep == dst_ep assert packet.data.serialize() == data async def _get_node_descriptor(dev, zdo_success=True, request_success=True): async def mockrequest(nwk, tries=None, delay=None): if not request_success: raise asyncio.TimeoutError status = 0 if zdo_success else 1 return [status, nwk, zdo_t.NodeDescriptor.deserialize(b"abcdefghijklm")[0]] dev.zdo.Node_Desc_req = MagicMock(side_effect=mockrequest) return await dev.get_node_descriptor() async def test_get_node_descriptor(dev): nd = await _get_node_descriptor(dev, zdo_success=True, request_success=True) assert nd is not None assert isinstance(nd, zdo_t.NodeDescriptor) assert dev.zdo.Node_Desc_req.call_count == 1 async def test_get_node_descriptor_no_reply(dev): with pytest.raises(asyncio.TimeoutError): await _get_node_descriptor(dev, zdo_success=True, request_success=False) assert dev.zdo.Node_Desc_req.call_count == 1 async def test_get_node_descriptor_fail(dev): with pytest.raises(zigpy.exceptions.InvalidResponse): await _get_node_descriptor(dev, zdo_success=False, request_success=True) assert dev.zdo.Node_Desc_req.call_count == 1 async def test_add_to_group(dev, monkeypatch): grp_id, grp_name = 0x1234, "test group 0x1234" epmock = MagicMock(spec_set=endpoint.Endpoint) monkeypatch.setattr(endpoint, "Endpoint", MagicMock(return_value=epmock)) epmock.add_to_group = AsyncMock() dev.add_endpoint(3) dev.add_endpoint(4) await dev.add_to_group(grp_id, grp_name) assert epmock.add_to_group.call_count == 2 assert epmock.add_to_group.call_args[0][0] == grp_id assert epmock.add_to_group.call_args[0][1] == grp_name async def test_remove_from_group(dev, monkeypatch): grp_id = 0x1234 epmock = MagicMock(spec_set=endpoint.Endpoint) monkeypatch.setattr(endpoint, "Endpoint", MagicMock(return_value=epmock)) epmock.remove_from_group = AsyncMock() dev.add_endpoint(3) dev.add_endpoint(4) await dev.remove_from_group(grp_id) assert epmock.remove_from_group.call_count == 2 assert epmock.remove_from_group.call_args[0][0] == grp_id async def test_schedule_group_membership(dev, caplog): """Test preempting group membership scan.""" p1 = patch.object(dev, "group_membership_scan", new=AsyncMock()) caplog.set_level(logging.DEBUG) with p1 as scan_mock: dev.schedule_group_membership_scan() await asyncio.sleep(0) assert scan_mock.call_count == 1 assert scan_mock.await_count == 1 assert not [r for r in caplog.records if r.name != "asyncio"] scan_mock.reset_mock() dev.schedule_group_membership_scan() dev.schedule_group_membership_scan() await asyncio.sleep(0) assert scan_mock.await_count == 1 assert "Cancelling old group rescan" in caplog.text async def test_group_membership_scan(dev): ep = dev.add_endpoint(1) ep.status = endpoint.Status.ZDO_INIT with patch.object(ep, "group_membership_scan", new=AsyncMock()): await dev.group_membership_scan() assert ep.group_membership_scan.await_count == 1 def test_device_manufacture_id_override(dev): """Test manufacturer id override.""" assert dev.manufacturer_id is None assert dev.manufacturer_id_override is None dev.node_desc = zdo_t.NodeDescriptor(1, 64, 142, 4153, 82, 255, 0, 255, 0) assert dev.manufacturer_id == 4153 dev.manufacturer_id_override = 2345 assert dev.manufacturer_id == 2345 dev.node_desc = None assert dev.manufacturer_id == 2345 def test_device_name(dev): """Test device name property.""" assert dev.nwk == 0xFFFF assert dev.name == "0xFFFF" def test_device_last_seen(dev, monkeypatch): """Test the device last_seen property handles updates and broadcasts events.""" monkeypatch.setattr(dev, "listener_event", MagicMock()) assert dev.last_seen is None dev.last_seen = 0 epoch = datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc) assert dev.last_seen == epoch.timestamp() dev.listener_event.assert_called_once_with("device_last_seen_updated", epoch) dev.listener_event.reset_mock() dev.update_last_seen() dev.listener_event.assert_called_once_with("device_last_seen_updated", ANY) event_time = dev.listener_event.mock_calls[0].args[1] assert (event_time - datetime.now(timezone.utc)).total_seconds() < 0.1 async def test_ignore_unknown_endpoint(dev, caplog): """Test that unknown endpoints are ignored.""" dev.add_endpoint(1) with caplog.at_level(logging.DEBUG): dev.packet_received( t.ZigbeePacket( profile_id=260, cluster_id=1, src_ep=2, dst_ep=3, data=t.SerializableBytes(b"data"), src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=dev.nwk, ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=0x0000, ), ) ) assert "Ignoring message on unknown endpoint" in caplog.text async def test_update_device_firmware_no_ota_cluster(dev): """Test that device firmware updates fails: no ota cluster.""" with pytest.raises(ValueError, match="Device has no OTA cluster"): await dev.update_firmware(sentinel.firmware_image, sentinel.progress_callback) dev.add_endpoint(1) dev.endpoints[1].output_clusters = MagicMock(side_effect=KeyError) with pytest.raises(ValueError, match="Device has no OTA cluster"): await dev.update_firmware(sentinel.firmware_image, sentinel.progress_callback) async def test_update_device_firmware_already_in_progress(dev, caplog): """Test that device firmware updates no ops when update is in progress.""" dev.ota_in_progress = True await dev.update_firmware(sentinel.firmware_image, sentinel.progress_callback) assert "OTA already in progress" in caplog.text async def test_update_device_firmware(monkeypatch, dev, caplog): """Test that device firmware updates execute the expected calls.""" tsn = 0x12 ep = dev.add_endpoint(1) cluster = zigpy.zcl.Cluster.from_id(ep, Ota.cluster_id, is_server=False) ep.add_output_cluster(Ota.cluster_id, cluster) dev.get_sequence = MagicMock(return_value=tsn) async def mockrequest(nwk, tries=None, delay=None): return [0, None, [0, 1, 2, 3, 4]] async def mockepinit(self, *args, **kwargs): self.status = endpoint.Status.ZDO_INIT self.add_input_cluster(Basic.cluster_id) async def mock_ep_get_model_info(self): if self.endpoint_id == 1: return "Model2", "Manufacturer2" monkeypatch.setattr(endpoint.Endpoint, "initialize", mockepinit) monkeypatch.setattr(endpoint.Endpoint, "get_model_info", mock_ep_get_model_info) dev.zdo.Active_EP_req = mockrequest await dev.initialize() fw_image = firmware.OTAImage() fw_image.subelements = [firmware.SubElement(tag_id=0x0000, data=b"fw_image")] fw_header = firmware.OTAImageHeader( file_version=0x12345678, image_type=0x90, manufacturer_id=0x1234, upgrade_file_id=firmware.OTAImageHeader.MAGIC_VALUE, header_version=256, header_length=56, field_control=0, stack_version=2, header_string="This is a test header!", image_size=56 + 2 + 4 + 8, ) fw_image.header = fw_header dev.application.ota.get_ota_image = MagicMock(side_effect=ValueError("No image")) def make_packet(cmd_name: str, **kwargs): req_hdr, req_cmd = cluster._create_request( general=False, command_id=cluster.commands_by_name[cmd_name].id, schema=cluster.commands_by_name[cmd_name].schema, disable_default_response=False, direction=foundation.Direction.Client_to_Server, args=(), kwargs=kwargs, ) ota_packet = t.ZigbeePacket( src=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=dev.nwk), src_ep=1, dst=t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=0x0000), dst_ep=1, tsn=req_hdr.tsn, profile_id=260, cluster_id=cluster.cluster_id, data=t.SerializableBytes(req_hdr.serialize() + req_cmd.serialize()), lqi=255, rssi=-30, ) return ota_packet async def send_packet(packet: t.ZigbeePacket): if packet.cluster_id == Ota.cluster_id: hdr, cmd = cluster.deserialize(packet.data.serialize()) if isinstance(cmd, Ota.ImageNotifyCommand): dev.application.packet_received( make_packet( "query_next_image", field_control=Ota.QueryNextImageCommand.FieldControl.HardwareVersion, manufacturer_code=fw_image.header.manufacturer_id, image_type=fw_image.header.image_type, current_file_version=fw_image.header.file_version - 10, hardware_version=1, ) ) elif isinstance( cmd, Ota.ClientCommandDefs.query_next_image_response.schema ): assert cmd.status == foundation.Status.SUCCESS assert cmd.manufacturer_code == fw_image.header.manufacturer_id assert cmd.image_type == fw_image.header.image_type assert cmd.file_version == fw_image.header.file_version assert cmd.image_size == fw_image.header.image_size dev.application.packet_received( make_packet( "image_block", field_control=Ota.ImageBlockCommand.FieldControl.RequestNodeAddr, manufacturer_code=fw_image.header.manufacturer_id, image_type=fw_image.header.image_type, file_version=fw_image.header.file_version, file_offset=0, maximum_data_size=40, request_node_addr=dev.ieee, ) ) elif isinstance(cmd, Ota.ClientCommandDefs.image_block_response.schema): if cmd.file_offset == 0: assert cmd.status == foundation.Status.SUCCESS assert cmd.manufacturer_code == fw_image.header.manufacturer_id assert cmd.image_type == fw_image.header.image_type assert cmd.file_version == fw_image.header.file_version assert cmd.file_offset == 0 assert cmd.image_data == fw_image.serialize()[0:40] dev.application.packet_received( make_packet( "image_block", field_control=Ota.ImageBlockCommand.FieldControl.RequestNodeAddr, manufacturer_code=fw_image.header.manufacturer_id, image_type=fw_image.header.image_type, file_version=fw_image.header.file_version, file_offset=40, maximum_data_size=40, request_node_addr=dev.ieee, ) ) elif cmd.file_offset == 40: assert cmd.status == foundation.Status.SUCCESS assert cmd.manufacturer_code == fw_image.header.manufacturer_id assert cmd.image_type == fw_image.header.image_type assert cmd.file_version == fw_image.header.file_version assert cmd.file_offset == 40 assert cmd.image_data == fw_image.serialize()[40:70] dev.application.packet_received( make_packet( "upgrade_end", status=foundation.Status.SUCCESS, manufacturer_code=fw_image.header.manufacturer_id, image_type=fw_image.header.image_type, file_version=fw_image.header.file_version, ) ) elif isinstance(cmd, Ota.ClientCommandDefs.upgrade_end_response.schema): assert cmd.manufacturer_code == fw_image.header.manufacturer_id assert cmd.image_type == fw_image.header.image_type assert cmd.file_version == fw_image.header.file_version assert cmd.current_time == 0 assert cmd.upgrade_time == 0 dev.application.send_packet = AsyncMock(side_effect=send_packet) progress_callback = MagicMock() result = await dev.update_firmware(fw_image, progress_callback) assert dev.application.send_packet.await_count == 5 assert progress_callback.call_count == 2 assert progress_callback.call_args_list[0] == call(40, 70, 57.142857142857146) assert progress_callback.call_args_list[1] == call(70, 70, 100.0) assert result == foundation.Status.SUCCESS progress_callback.reset_mock() dev.application.send_packet.reset_mock() result = await dev.update_firmware( fw_image, progress_callback=progress_callback, force=True ) assert dev.application.send_packet.await_count == 5 assert progress_callback.call_count == 2 assert progress_callback.call_args_list[0] == call(40, 70, 57.142857142857146) assert progress_callback.call_args_list[1] == call(70, 70, 100.0) assert fw_image.header.file_version == 0xFFFFFFFF - 1 assert result == foundation.Status.SUCCESS # _image_query_req exception test dev.application.send_packet.reset_mock() progress_callback.reset_mock() image_notify = cluster.image_notify cluster.image_notify = AsyncMock(side_effect=zigpy.exceptions.DeliveryError("Foo")) result = await dev.update_firmware(fw_image, progress_callback=progress_callback) assert dev.application.send_packet.await_count == 0 assert progress_callback.call_count == 0 assert "OTA image_notify handler - exception" in caplog.text assert result == foundation.Status.FAILURE cluster.image_notify = image_notify caplog.clear() # _image_query_req exception test dev.application.send_packet.reset_mock() progress_callback.reset_mock() query_next_image_response = cluster.query_next_image_response cluster.query_next_image_response = AsyncMock( side_effect=zigpy.exceptions.DeliveryError("Foo") ) result = await dev.update_firmware(fw_image, progress_callback=progress_callback) assert dev.application.send_packet.await_count == 1 # just image notify assert progress_callback.call_count == 0 assert "OTA query_next_image handler - exception" in caplog.text assert result == foundation.Status.FAILURE cluster.query_next_image_response = query_next_image_response caplog.clear() # _image_block_req exception test dev.application.send_packet.reset_mock() progress_callback.reset_mock() image_block_response = cluster.image_block_response cluster.image_block_response = AsyncMock( side_effect=zigpy.exceptions.DeliveryError("Foo") ) result = await dev.update_firmware(fw_image, progress_callback=progress_callback) assert ( dev.application.send_packet.await_count == 2 ) # just image notify + query next image assert progress_callback.call_count == 0 assert "OTA image_block handler - exception" in caplog.text assert result == foundation.Status.FAILURE cluster.image_block_response = image_block_response caplog.clear() # _upgrade_end exception test dev.application.send_packet.reset_mock() progress_callback.reset_mock() upgrade_end_response = cluster.upgrade_end_response cluster.upgrade_end_response = AsyncMock( side_effect=zigpy.exceptions.DeliveryError("Foo") ) result = await dev.update_firmware(fw_image, progress_callback=progress_callback) assert ( dev.application.send_packet.await_count == 4 ) # just image notify, qne, and 2 img blocks assert progress_callback.call_count == 2 assert "OTA upgrade_end handler - exception" in caplog.text assert result == foundation.Status.FAILURE cluster.upgrade_end_response = upgrade_end_response caplog.clear() async def send_packet(packet: t.ZigbeePacket): if packet.cluster_id == Ota.cluster_id: hdr, cmd = cluster.deserialize(packet.data.serialize()) if isinstance(cmd, Ota.ImageNotifyCommand): dev.application.packet_received( make_packet( "query_next_image", field_control=Ota.QueryNextImageCommand.FieldControl.HardwareVersion, manufacturer_code=fw_image.header.manufacturer_id, image_type=fw_image.header.image_type, current_file_version=fw_image.header.file_version - 10, hardware_version=1, ) ) elif isinstance( cmd, Ota.ClientCommandDefs.query_next_image_response.schema ): assert cmd.status == foundation.Status.SUCCESS assert cmd.manufacturer_code == fw_image.header.manufacturer_id assert cmd.image_type == fw_image.header.image_type assert cmd.file_version == fw_image.header.file_version assert cmd.image_size == fw_image.header.image_size dev.application.packet_received( make_packet( "image_block", field_control=Ota.ImageBlockCommand.FieldControl.RequestNodeAddr, manufacturer_code=fw_image.header.manufacturer_id, image_type=fw_image.header.image_type, file_version=fw_image.header.file_version, file_offset=300, maximum_data_size=40, request_node_addr=dev.ieee, ) ) dev.application.send_packet = AsyncMock(side_effect=send_packet) progress_callback.reset_mock() image_block_response = cluster.image_block_response cluster.image_block_response = AsyncMock( side_effect=zigpy.exceptions.DeliveryError("Foo") ) result = await dev.update_firmware(fw_image, progress_callback=progress_callback) assert ( dev.application.send_packet.await_count == 2 ) # just image notify, qne, img block response fails assert progress_callback.call_count == 0 assert "OTA image_block handler[MALFORMED_COMMAND] - exception" in caplog.text assert result == foundation.Status.MALFORMED_COMMAND cluster.image_block_response = image_block_response async def test_deserialize_backwards_compat(dev): """Test that deserialization uses the method if it is overloaded.""" packet = t.ZigbeePacket( profile_id=260, cluster_id=Basic.cluster_id, src_ep=1, dst_ep=1, data=t.SerializableBytes( b"\x18\x56\x09\x00\x00\x00\x00\x25\x1e\x00\x84\x03\x01\x02\x03\x04\x05\x06" ), src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=dev.nwk, ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=0x0000, ), ) ep = dev.add_endpoint(1) ep.add_input_cluster(Basic.cluster_id) dev.packet_received(packet) # Replace the method dev.deserialize = MagicMock(side_effect=dev.deserialize) dev.packet_received(packet) assert dev.deserialize.call_count == 1 async def test_request_exception_propagation(dev, event_loop): """Test that exceptions are propagated to the caller.""" tsn = 0x12 ep = dev.add_endpoint(1) ep.add_input_cluster(Basic.cluster_id) ep.deserialize = MagicMock(side_effect=RuntimeError()) dev.get_sequence = MagicMock(return_value=tsn) event_loop.call_soon( dev.packet_received, t.ZigbeePacket( profile_id=260, cluster_id=Basic.cluster_id, src_ep=1, dst_ep=1, data=t.SerializableBytes( foundation.ZCLHeader( frame_control=foundation.FrameControl( frame_type=foundation.FrameType.CLUSTER_COMMAND, is_manufacturer_specific=False, direction=foundation.Direction.Server_to_Client, disable_default_response=True, reserved=0, ), tsn=tsn, command_id=foundation.GeneralCommand.Default_Response, manufacturer=None, ).serialize() + ( foundation.GENERAL_COMMANDS[ foundation.GeneralCommand.Default_Response ] .schema( command_id=Basic.ServerCommandDefs.reset_fact_default.id, status=foundation.Status.SUCCESS, ) .serialize() ) ), src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=dev.nwk, ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=0x0000, ), ), ) with pytest.raises(zigpy.exceptions.ParsingError) as exc: await ep.basic.reset_fact_default() assert type(exc.value.__cause__) is RuntimeError zigpy-0.62.3/tests/test_endpoint.py000066400000000000000000000407201456054056700173560ustar00rootroot00000000000000import asyncio import pytest from zigpy import endpoint, group import zigpy.device import zigpy.exceptions import zigpy.types as t import zigpy.zcl as zcl from zigpy.zcl.foundation import GENERAL_COMMANDS, GeneralCommand, Status as ZCLStatus from zigpy.zdo import types from .async_mock import AsyncMock, MagicMock, patch, sentinel @pytest.fixture def ep(): dev = MagicMock() dev.request = AsyncMock() dev.reply = AsyncMock() return endpoint.Endpoint(dev, 1) async def _test_initialize(ep, profile): async def mockrequest(nwk, epid, tries=None, delay=None): sd = types.SimpleDescriptor() sd.endpoint = 1 sd.profile = profile sd.device_type = 0xFF sd.input_clusters = [5] sd.output_clusters = [6] return [0, None, sd] ep._device.zdo.Simple_Desc_req = mockrequest await ep.initialize() assert ep.status > endpoint.Status.NEW assert 5 in ep.in_clusters assert 6 in ep.out_clusters async def test_inactive_initialize(ep): async def mockrequest(nwk, epid, tries=None, delay=None): sd = types.SimpleDescriptor() sd.endpoint = 2 return [131, None, sd] ep._device.zdo.Simple_Desc_req = mockrequest await ep.initialize() assert ep.status == endpoint.Status.ENDPOINT_INACTIVE async def test_initialize_zha(ep): return await _test_initialize(ep, 260) async def test_initialize_zll(ep): return await _test_initialize(ep, 49246) async def test_initialize_other(ep): return await _test_initialize(ep, 0x1234) async def test_initialize_fail(ep): async def mockrequest(nwk, epid, tries=None, delay=None): return [1, None, None] ep._device.zdo.Simple_Desc_req = mockrequest # The request succeeds but the response is invalid with pytest.raises(zigpy.exceptions.InvalidResponse): await ep.initialize() assert ep.status == endpoint.Status.NEW async def test_reinitialize(ep): await _test_initialize(ep, 260) assert ep.profile_id == 260 ep.profile_id = 10 await _test_initialize(ep, 260) assert ep.profile_id == 10 def test_add_input_cluster(ep): ep.add_input_cluster(0) assert 0 in ep.in_clusters assert ep.in_clusters[0].is_server is True assert ep.in_clusters[0].is_client is False def test_add_custom_input_cluster(ep): mock_cluster = MagicMock() ep.add_input_cluster(0, mock_cluster) assert 0 in ep.in_clusters assert ep.in_clusters[0] is mock_cluster def test_add_output_cluster(ep): ep.add_output_cluster(0) assert 0 in ep.out_clusters assert ep.out_clusters[0].is_server is False assert ep.out_clusters[0].is_client is True def test_add_custom_output_cluster(ep): mock_cluster = MagicMock() ep.add_output_cluster(0, mock_cluster) assert 0 in ep.out_clusters assert ep.out_clusters[0] is mock_cluster def test_multiple_add_input_cluster(ep): ep.add_input_cluster(0) assert ep.in_clusters[0].cluster_id == 0 ep.in_clusters[0].cluster_id = 1 assert ep.in_clusters[0].cluster_id == 1 ep.add_input_cluster(0) assert ep.in_clusters[0].cluster_id == 1 def test_multiple_add_output_cluster(ep): ep.add_output_cluster(0) assert ep.out_clusters[0].cluster_id == 0 ep.out_clusters[0].cluster_id = 1 assert ep.out_clusters[0].cluster_id == 1 ep.add_output_cluster(0) assert ep.out_clusters[0].cluster_id == 1 def test_handle_message(ep): c = ep.add_input_cluster(0) c.handle_message = MagicMock() ep.handle_message(sentinel.profile, 0, sentinel.hdr, sentinel.data) c.handle_message.assert_called_once_with( sentinel.hdr, sentinel.data, dst_addressing=None ) def test_handle_message_output(ep): c = ep.add_output_cluster(0) c.handle_message = MagicMock() ep.handle_message(sentinel.profile, 0, sentinel.hdr, sentinel.data) c.handle_message.assert_called_once_with( sentinel.hdr, sentinel.data, dst_addressing=None ) def test_handle_request_unknown(ep): hdr = MagicMock() hdr.command_id = sentinel.command_id ep.handle_message(sentinel.profile, 99, hdr, sentinel.args) def test_cluster_attr(ep): with pytest.raises(AttributeError): ep.basic ep.add_input_cluster(0) ep.basic async def test_request(ep): ep.profile_id = 260 await ep.request(7, 8, b"") assert ep._device.request.call_count == 1 assert ep._device.request.await_count == 1 async def test_request_change_profileid(ep): ep.profile_id = 49246 await ep.request(7, 9, b"") ep.profile_id = 49246 await ep.request(0x1000, 10, b"") ep.profile_id = 260 await ep.request(0x1000, 11, b"") assert ep._device.request.call_count == 3 assert ep._device.request.await_count == 3 async def test_reply(ep): ep.profile_id = 260 await ep.reply(7, 8, b"") assert ep._device.reply.call_count == 1 async def test_reply_change_profile_id(ep): ep.profile_id = 49246 await ep.reply(0x1000, 8, b"", 0x3F) assert ep._device.reply.call_count == 1 assert ep._device.reply.call_args[0][0] == ep.profile_id await ep.reply(0x1000, 8, b"", 0x40) assert ep._device.reply.call_count == 2 assert ep._device.reply.call_args[0][0] == 0x0104 ep.profile_id = 0xBEEF await ep.reply(0x1000, 8, b"", 0x40) assert ep._device.reply.call_count == 3 assert ep._device.reply.call_args[0][0] == ep.profile_id def _mk_rar(attrid, value, status=0): r = zcl.foundation.ReadAttributeRecord() r.attrid = attrid r.status = status r.value = zcl.foundation.TypeValue() r.value.value = value return r def _get_model_info(ep, attributes={}): clus = ep.add_input_cluster(0) assert 0 in ep.in_clusters assert ep.in_clusters[0] is clus async def mockrequest( foundation, command, schema, args, manufacturer=None, **kwargs ): assert foundation is True assert command == 0 result = [] for attr_id, value in zip(args, attributes[tuple(args)]): if isinstance(value, BaseException): raise value elif value is None: rar = _mk_rar(attr_id, None, status=1) else: raw_attr_value = t.uint8_t(len(value)).serialize() + value rar = _mk_rar(attr_id, t.CharacterString.deserialize(raw_attr_value)[0]) result.append(rar) return [result] clus.request = mockrequest return ep.get_model_info() async def test_get_model_info(ep): mod, man = await _get_model_info( ep, attributes={ (0x0004, 0x0005): (b"Mock Manufacturer", b"Mock Model"), }, ) assert man == "Mock Manufacturer" assert mod == "Mock Model" async def test_init_endpoint_info_none(ep): mod, man = await _get_model_info( ep, attributes={ (0x0004, 0x0005): (None, None), (0x0004,): (None,), (0x0005,): (None,), }, ) assert man is None assert mod is None async def test_get_model_info_missing_basic_cluster(ep): assert zcl.clusters.general.Basic.cluster_id not in ep.in_clusters model, manuf = await ep.get_model_info() assert model is None assert manuf is None async def test_init_endpoint_info_null_padded_manuf(ep): mod, man = await _get_model_info( ep, attributes={ (0x0004, 0x0005): ( b"Mock Manufacturer\x00\x04\\\x00\\\x00\x00\x00\x00\x00\x07", b"Mock Model", ), }, ) assert man == "Mock Manufacturer" assert mod == "Mock Model" async def test_init_endpoint_info_null_padded_model(ep): mod, man = await _get_model_info( ep, attributes={ (0x0004, 0x0005): ( b"Mock Manufacturer", b"Mock Model\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ), }, ) assert man == "Mock Manufacturer" assert mod == "Mock Model" async def test_init_endpoint_info_null_padded_manuf_model(ep): mod, man = await _get_model_info( ep, attributes={ (0x0004, 0x0005): ( b"Mock Manufacturer\x00\x04\\\x00\\\x00\x00\x00\x00\x00\x07", b"Mock Model\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ), }, ) assert man == "Mock Manufacturer" assert mod == "Mock Model" async def test_get_model_info_delivery_error(ep): with pytest.raises(zigpy.exceptions.ZigbeeException): await _get_model_info( ep, attributes={ (0x0004, 0x0005): ( zigpy.exceptions.ZigbeeException(), zigpy.exceptions.ZigbeeException(), ) }, ) async def test_get_model_info_timeout(ep): with pytest.raises(asyncio.TimeoutError): await _get_model_info( ep, attributes={ (0x0004, 0x0005): (asyncio.TimeoutError(), asyncio.TimeoutError()), (0x0004,): (asyncio.TimeoutError(),), (0x0005,): (asyncio.TimeoutError(),), }, ) async def test_get_model_info_double_read_timeout(ep): mod, man = await _get_model_info( ep, attributes={ # The double read fails (0x0004, 0x0005): (asyncio.TimeoutError(), asyncio.TimeoutError()), # But individually the attributes can be read (0x0004,): (b"Mock Manufacturer",), (0x0005,): (b"Mock Model",), }, ) assert man == "Mock Manufacturer" assert mod == "Mock Model" def _group_add_mock(ep, status=ZCLStatus.SUCCESS, no_groups_cluster=False): async def mock_req(*args, **kwargs): return [status, sentinel.group_id] if not no_groups_cluster: ep.add_input_cluster(4) ep.request = MagicMock(side_effect=mock_req) ep.device.application.groups = MagicMock(spec_set=group.Groups) return ep @pytest.mark.parametrize("status", (ZCLStatus.SUCCESS, ZCLStatus.DUPLICATE_EXISTS)) async def test_add_to_group(ep, status): ep = _group_add_mock(ep, status=status) grp_id, grp_name = 0x1234, "Group 0x1234**" res = await ep.add_to_group(grp_id, grp_name) assert res == status assert ep.request.call_count == 1 groups = ep.device.application.groups assert groups.add_group.call_count == 1 assert groups.remove_group.call_count == 0 assert groups.add_group.call_args[0][0] == grp_id assert groups.add_group.call_args[0][1] == grp_name async def test_add_to_group_no_groups(ep): ep = _group_add_mock(ep, no_groups_cluster=True) grp_id, grp_name = 0x1234, "Group 0x1234**" res = await ep.add_to_group(grp_id, grp_name) assert res != ZCLStatus.SUCCESS assert ep.request.call_count == 0 groups = ep.device.application.groups assert groups.add_group.call_count == 0 assert groups.remove_group.call_count == 0 @pytest.mark.parametrize( "status", (s for s in ZCLStatus if s not in (ZCLStatus.SUCCESS, ZCLStatus.DUPLICATE_EXISTS)), ) async def test_add_to_group_fail(ep, status): ep = _group_add_mock(ep, status=status) grp_id, grp_name = 0x1234, "Group 0x1234**" res = await ep.add_to_group(grp_id, grp_name) assert res != ZCLStatus.SUCCESS assert ep.request.call_count == 1 groups = ep.device.application.groups assert groups.add_group.call_count == 0 assert groups.remove_group.call_count == 0 def _group_remove_mock(ep, success=True, no_groups_cluster=False, not_member=False): async def mock_req(*args, **kwargs): if success: return [ZCLStatus.SUCCESS, sentinel.group_id] return [ZCLStatus.DUPLICATE_EXISTS, sentinel.group_id] if not no_groups_cluster: ep.add_input_cluster(4) ep.request = MagicMock(side_effect=mock_req) ep.device.application.groups = MagicMock(spec_set=group.Groups) grp = MagicMock(spec_set=group.Group) ep.device.application.groups.__contains__.return_value = not not_member ep.device.application.groups.__getitem__.return_value = grp return ep, grp async def test_remove_from_group(ep): grp_id = 0x1234 ep, grp_mock = _group_remove_mock(ep) res = await ep.remove_from_group(grp_id) assert res == ZCLStatus.SUCCESS assert ep.request.call_count == 1 groups = ep.device.application.groups assert groups.add_group.call_count == 0 assert groups.remove_group.call_count == 0 assert groups.__getitem__.call_args[0][0] == grp_id assert grp_mock.add_member.call_count == 0 assert grp_mock.remove_member.call_count == 1 assert grp_mock.remove_member.call_args[0][0] == ep async def test_remove_from_group_no_groups_cluster(ep): grp_id = 0x1234 ep, grp_mock = _group_remove_mock(ep, no_groups_cluster=True) res = await ep.remove_from_group(grp_id) assert res != ZCLStatus.SUCCESS assert ep.request.call_count == 0 groups = ep.device.application.groups assert groups.add_group.call_count == 0 assert groups.remove_group.call_count == 0 assert grp_mock.add_member.call_count == 0 assert grp_mock.remove_member.call_count == 0 async def test_remove_from_group_fail(ep): grp_id = 0x1234 ep, grp_mock = _group_remove_mock(ep, success=False) res = await ep.remove_from_group(grp_id) assert res != ZCLStatus.SUCCESS assert ep.request.call_count == 1 groups = ep.device.application.groups assert groups.add_group.call_count == 0 assert groups.remove_group.call_count == 0 assert grp_mock.add_member.call_count == 0 assert grp_mock.remove_member.call_count == 0 def test_ep_manufacturer(ep): ep.device.manufacturer = sentinel.device_manufacturer assert ep.manufacturer is sentinel.device_manufacturer ep.manufacturer = sentinel.ep_manufacturer assert ep.manufacturer is sentinel.ep_manufacturer def test_ep_model(ep): ep.device.model = sentinel.device_model assert ep.model is sentinel.device_model ep.model = sentinel.ep_model assert ep.model is sentinel.ep_model async def test_group_membership_scan(ep): """Test group membership scan.""" ep.device.application.groups.update_group_membership = MagicMock() await ep.group_membership_scan() assert ep.device.application.groups.update_group_membership.call_count == 0 assert ep.device.request.call_count == 0 ep.add_input_cluster(4) ep.device.request.return_value = [0, [1, 3, 7]] await ep.group_membership_scan() assert ep.device.application.groups.update_group_membership.call_count == 1 assert ep.device.application.groups.update_group_membership.call_args[0][1] == { 1, 3, 7, } assert ep.device.request.call_count == 1 async def test_group_membership_scan_fail(ep): """Test group membership scan failure.""" ep.device.application.groups.update_group_membership = MagicMock() ep.add_input_cluster(4) ep.device.request.side_effect = asyncio.TimeoutError await ep.group_membership_scan() assert ep.device.application.groups.update_group_membership.call_count == 0 assert ep.device.request.call_count == 1 async def test_group_membership_scan_fail_default_response(ep, caplog): """Test group membership scan failure because group commands are unsupported.""" ep.device.application.groups.update_group_membership = MagicMock() ep.add_input_cluster(4) ep.device.request.side_effect = asyncio.TimeoutError with patch.object(ep.groups, "get_membership", new=AsyncMock()) as get_membership: get_membership.return_value = GENERAL_COMMANDS[ GeneralCommand.Default_Response ].schema(command_id=2, status=ZCLStatus.UNSUP_CLUSTER_COMMAND) await ep.group_membership_scan() assert "Device does not support group commands" in caplog.text assert ep.device.application.groups.update_group_membership.call_count == 0 def test_endpoint_manufacturer_id(ep): """Test manufacturer id.""" ep.device.manufacturer_id = sentinel.manufacturer_id assert ep.manufacturer_id is sentinel.manufacturer_id def test_endpoint_repr(ep): ep.status = endpoint.Status.ZDO_INIT # All standard ep.add_input_cluster(0x0001) ep.add_input_cluster(0x0002) ep.add_output_cluster(0x0006) ep.add_output_cluster(0x0008) # Spec-violating but still happens (https://github.com/zigpy/zigpy/issues/758) ep.add_input_cluster(0xEF00) assert "ZDO_INIT" in repr(ep) assert "power:0x0001" in repr(ep) assert "device_temperature:0x0002" in repr(ep) assert "on_off:0x0006" in repr(ep) assert "level:0x0008" in repr(ep) assert "0xEF00" in repr(ep) zigpy-0.62.3/tests/test_group.py000066400000000000000000000257211456054056700166760ustar00rootroot00000000000000import pytest import zigpy.device import zigpy.endpoint import zigpy.group import zigpy.types as t import zigpy.zcl from .async_mock import AsyncMock, MagicMock, call, sentinel FIXTURE_GRP_ID = 0x1001 FIXTURE_GRP_NAME = "fixture group" @pytest.fixture def endpoint(app_mock): ieee = t.EUI64(map(t.uint8_t, [0, 1, 2, 3, 4, 5, 6, 7])) dev = zigpy.device.Device(app_mock, ieee, 65535) return zigpy.endpoint.Endpoint(dev, 3) @pytest.fixture def groups(app_mock): groups = zigpy.group.Groups(app_mock) groups.listener_event = MagicMock() groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True) return groups @pytest.fixture def group(): groups_mock = MagicMock(spec_set=zigpy.group.Groups) groups_mock.application.mrequest = AsyncMock() return zigpy.group.Group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, groups_mock) @pytest.fixture def group_endpoint(group): group.request = AsyncMock() return zigpy.group.GroupEndpoint(group) def test_add_group(groups, monkeypatch): monkeypatch.setattr( zigpy.group, "Group", MagicMock(spec_set=zigpy.group.Group, return_value=sentinel.group), ) grp_id, grp_name = 0x1234, "Group Name for 0x1234 group." assert grp_id not in groups ret = groups.add_group(grp_id, grp_name) assert groups.listener_event.call_count == 1 assert ret is sentinel.group groups.listener_event.reset_mock() ret = groups.add_group(grp_id, grp_name) assert groups.listener_event.call_count == 0 assert ret is sentinel.group def test_add_group_no_evt(groups, monkeypatch): monkeypatch.setattr( zigpy.group, "Group", MagicMock(spec_set=zigpy.group.Group, return_value=sentinel.group), ) grp_id, grp_name = 0x1234, "Group Name for 0x1234 group." assert grp_id not in groups ret = groups.add_group(grp_id, grp_name, suppress_event=True) assert groups.listener_event.call_count == 0 assert ret is sentinel.group groups.listener_event.reset_mock() ret = groups.add_group(grp_id, grp_name) assert groups.listener_event.call_count == 0 assert ret is sentinel.group def test_pop_group_id(groups, endpoint): group = groups[FIXTURE_GRP_ID] group.add_member(endpoint) group.remove_member = MagicMock(side_effect=group.remove_member) groups.listener_event.reset_mock() assert FIXTURE_GRP_ID in groups grp = groups.pop(FIXTURE_GRP_ID) assert isinstance(grp, zigpy.group.Group) assert FIXTURE_GRP_ID not in groups assert groups.listener_event.call_count == 2 assert group.remove_member.call_count == 1 assert group.remove_member.call_args[0][0] is endpoint with pytest.raises(KeyError): groups.pop(FIXTURE_GRP_ID) def test_pop_group(groups, endpoint): assert FIXTURE_GRP_ID in groups group = groups[FIXTURE_GRP_ID] group.add_member(endpoint) group.remove_member = MagicMock(side_effect=group.remove_member) groups.listener_event.reset_mock() grp = groups.pop(group) assert isinstance(grp, zigpy.group.Group) assert FIXTURE_GRP_ID not in groups assert groups.listener_event.call_count == 2 assert group.remove_member.call_count == 1 assert group.remove_member.call_args[0][0] is endpoint with pytest.raises(KeyError): groups.pop(grp) def test_group_add_member(group, endpoint): listener = MagicMock() group.add_listener(listener) assert endpoint.unique_id not in group.members assert FIXTURE_GRP_ID not in endpoint.member_of group.add_member(endpoint) assert endpoint.unique_id in group.members assert FIXTURE_GRP_ID in endpoint.member_of assert listener.member_added.call_count == 1 assert listener.member_removed.call_count == 0 listener.reset_mock() group.add_member(endpoint) assert listener.member_added.call_count == 0 assert listener.member_removed.call_count == 0 group.__repr__() assert group.name == FIXTURE_GRP_NAME with pytest.raises(ValueError): group.add_member(endpoint.endpoint_id) def test_group_add_member_no_evt(group, endpoint): listener = MagicMock() group.add_listener(listener) assert endpoint.unique_id not in group group.add_member(endpoint, suppress_event=True) assert endpoint.unique_id in group assert FIXTURE_GRP_ID in endpoint.member_of assert listener.member_added.call_count == 0 assert listener.member_removed.call_count == 0 def test_noname_group(): group = zigpy.group.Group(FIXTURE_GRP_ID) assert group.name.startswith("No name group ") def test_group_remove_member(group, endpoint): listener = MagicMock() group.add_listener(listener) group.add_member(endpoint, suppress_event=True) assert endpoint.unique_id in group assert FIXTURE_GRP_ID in endpoint.member_of group.remove_member(endpoint) assert endpoint.unique_id not in group assert FIXTURE_GRP_ID not in endpoint.member_of assert listener.member_added.call_count == 0 assert listener.member_removed.call_count == 1 def test_group_magic_methods(group, endpoint): group.add_member(endpoint, suppress_event=True) assert endpoint.unique_id in group.members assert endpoint.unique_id in group assert group[endpoint.unique_id] is endpoint def test_groups_properties(groups: zigpy.group.Groups): """Test groups properties.""" assert groups.application is not None def test_group_properties(group: zigpy.group.Group): """Test group properties.""" assert group.application is not None assert group.groups is not None assert isinstance(group.endpoint, zigpy.group.GroupEndpoint) def test_group_cluster_from_cluster_id(): """Group cluster by cluster id.""" cls = zigpy.group.GroupCluster.from_id(MagicMock(), 6) assert isinstance(cls, zigpy.zcl.Cluster) with pytest.raises(KeyError): zigpy.group.GroupCluster.from_id(MagicMock(), 0xFFFF) def test_group_cluster_from_cluster_name(): """Group cluster by cluster name.""" cls = zigpy.group.GroupCluster.from_attr(MagicMock(), "on_off") assert isinstance(cls, zigpy.zcl.Cluster) with pytest.raises(AttributeError): zigpy.group.GroupCluster.from_attr(MagicMock(), "no_such_cluster") async def test_group_ep_request(group_endpoint): on_off = zigpy.group.GroupCluster.from_attr(group_endpoint, "on_off") await on_off.on() assert group_endpoint.device.request.mock_calls == [ call( 260, # profile 0x0006, # cluster 1, # sequence b"\x01\x01\x01", # data ) ] def test_group_ep_reply(group_endpoint): group_endpoint.request = MagicMock() group_endpoint.reply( sentinel.cluster, sentinel.seq, sentinel.data, sentinel.extra_arg, extra_kwarg=sentinel.extra_kwarg, ) assert group_endpoint.request.call_count == 1 assert group_endpoint.request.call_args[0][0] is sentinel.cluster assert group_endpoint.request.call_args[0][1] is sentinel.seq assert group_endpoint.request.call_args[0][2] is sentinel.data assert group_endpoint.request.call_args[0][3] is sentinel.extra_arg assert group_endpoint.request.call_args[1]["extra_kwarg"] is sentinel.extra_kwarg def test_group_ep_by_cluster_id(group_endpoint, monkeypatch): clusters = {} group_endpoint._clusters = MagicMock(return_value=clusters) group_endpoint._clusters.__getitem__.side_effect = clusters.__getitem__ group_endpoint._clusters.__setitem__.side_effect = clusters.__setitem__ group_cluster_mock = MagicMock() group_cluster_mock.from_id.return_value = sentinel.group_cluster monkeypatch.setattr(zigpy.group, "GroupCluster", group_cluster_mock) assert len(clusters) == 0 cluster = group_endpoint[6] assert cluster is sentinel.group_cluster assert group_cluster_mock.from_id.call_count == 1 assert len(clusters) == 1 cluster = group_endpoint[6] assert cluster is sentinel.group_cluster assert group_cluster_mock.from_id.call_count == 1 def test_group_ep_by_cluster_attr(group_endpoint, monkeypatch): cluster_by_attr = {} group_endpoint._cluster_by_attr = MagicMock(return_value=cluster_by_attr) group_endpoint._cluster_by_attr.__getitem__.side_effect = ( cluster_by_attr.__getitem__ ) group_endpoint._cluster_by_attr.__setitem__.side_effect = ( cluster_by_attr.__setitem__ ) group_cluster_mock = MagicMock() group_cluster_mock.from_attr.return_value = sentinel.group_cluster monkeypatch.setattr(zigpy.group, "GroupCluster", group_cluster_mock) assert len(cluster_by_attr) == 0 cluster = group_endpoint.on_off assert cluster is sentinel.group_cluster assert group_cluster_mock.from_attr.call_count == 1 assert len(cluster_by_attr) == 1 cluster = group_endpoint.on_off assert cluster is sentinel.group_cluster assert group_cluster_mock.from_attr.call_count == 1 async def test_group_request(group): group.application.send_packet = AsyncMock() data = b"\x01\x02\x03\x04\x05" res = await group.request( sentinel.profile, sentinel.cluster, sentinel.sequence, data, ) assert group.application.send_packet.call_count == 1 packet = group.application.send_packet.mock_calls[0].args[0] assert packet.dst == t.AddrModeAddress( addr_mode=t.AddrMode.Group, address=group.group_id ) assert packet.profile_id is sentinel.profile assert packet.cluster_id is sentinel.cluster assert packet.tsn is sentinel.sequence assert packet.data.serialize() == data assert res.status is zigpy.zcl.foundation.Status.SUCCESS assert res.command_id == data[2] def test_update_group_membership_remove_member(groups, endpoint): """New device is not member of the old groups.""" groups[FIXTURE_GRP_ID].add_member(endpoint) assert endpoint.unique_id in groups[FIXTURE_GRP_ID] groups.update_group_membership(endpoint, set()) assert endpoint.unique_id not in groups[FIXTURE_GRP_ID] def test_update_group_membership_remove_add(groups, endpoint): """New device is not member of the old group, but member of new one.""" groups[FIXTURE_GRP_ID].add_member(endpoint) assert endpoint.unique_id in groups[FIXTURE_GRP_ID] new_group_id = 0x1234 assert new_group_id not in groups groups.update_group_membership(endpoint, {new_group_id}) assert endpoint.unique_id not in groups[FIXTURE_GRP_ID] assert new_group_id in groups assert endpoint.unique_id in groups[new_group_id] def test_update_group_membership_add_existing(groups, endpoint): """New device is member of new and existing groups.""" groups[FIXTURE_GRP_ID].add_member(endpoint) assert endpoint.unique_id in groups[FIXTURE_GRP_ID] new_group_id = 0x1234 groups.add_group(new_group_id) assert new_group_id in groups groups.update_group_membership(endpoint, {new_group_id, FIXTURE_GRP_ID}) assert endpoint.unique_id in groups[FIXTURE_GRP_ID] assert new_group_id in groups assert endpoint.unique_id in groups[new_group_id] zigpy-0.62.3/tests/test_listeners.py000066400000000000000000000132611456054056700175460ustar00rootroot00000000000000import asyncio import logging from unittest import mock import pytest from zigpy import listeners from zigpy.zcl import foundation import zigpy.zcl.clusters.general import zigpy.zdo.types as zdo_t def make_hdr(cmd, **kwargs): return foundation.ZCLHeader.cluster(tsn=0x12, command_id=cmd.command.id, **kwargs) query_next_image = zigpy.zcl.clusters.general.Ota.commands_by_name[ "query_next_image" ].schema on = zigpy.zcl.clusters.general.OnOff.commands_by_name["on"].schema off = zigpy.zcl.clusters.general.OnOff.commands_by_name["off"].schema toggle = zigpy.zcl.clusters.general.OnOff.commands_by_name["toggle"].schema async def test_future_listener(): listener = listeners.FutureListener( matchers=[ query_next_image(manufacturer_code=0x1234), on(), lambda hdr, cmd: hdr.command_id == 0x02, ], future=asyncio.get_running_loop().create_future(), ) assert not listener.resolve(make_hdr(off()), off()) assert not listener.resolve( make_hdr(query_next_image()), query_next_image( field_control=0, manufacturer_code=0x5678, # wrong `manufacturer_code` image_type=0x0000, current_file_version=0x00000000, ), ) # Only `on()` matches assert listener.resolve(make_hdr(on()), on()) assert listener.future.result() == (make_hdr(on()), on()) # Subsequent matches will not work assert not listener.resolve(make_hdr(on()), on()) # Reset the future object.__setattr__(listener, "future", asyncio.get_running_loop().create_future()) valid_query = query_next_image( field_control=0, manufacturer_code=0x1234, # correct `manufacturer_code` image_type=0x0000, current_file_version=0x00000000, ) assert listener.resolve(make_hdr(valid_query), valid_query) assert listener.future.result() == (make_hdr(valid_query), valid_query) # Reset the future object.__setattr__(listener, "future", asyncio.get_running_loop().create_future()) # Function matcher works assert listener.resolve(make_hdr(toggle()), toggle()) assert listener.future.result() == (make_hdr(toggle()), toggle()) async def test_future_listener_cancellation(): listener = listeners.FutureListener( matchers=[], future=asyncio.get_running_loop().create_future(), ) assert listener.cancel() assert listener.cancel() assert listener.cancel() with pytest.raises(asyncio.CancelledError): await listener.future async def test_callback_listener(): listener = listeners.CallbackListener( matchers=[ query_next_image(manufacturer_code=0x1234), on(), ], callback=mock.Mock(), ) assert not listener.resolve(make_hdr(off()), off()) assert not listener.resolve( make_hdr(query_next_image()), query_next_image( field_control=0, manufacturer_code=0x5678, # wrong `manufacturer_code` image_type=0x0000, current_file_version=0x00000000, ), ) # Only `on()` matches assert listener.resolve(make_hdr(on()), on()) assert listener.callback.mock_calls == [mock.call(make_hdr(on()), on())] # Subsequent matches still work assert not listener.cancel() # cancellation is not supported assert listener.resolve(make_hdr(on()), on()) assert listener.callback.mock_calls == [ mock.call(make_hdr(on()), on()), mock.call(make_hdr(on()), on()), ] async def test_callback_listener_error(caplog): listener = listeners.CallbackListener( matchers=[ on(), ], callback=mock.Mock(side_effect=RuntimeError("Uh oh")), ) with caplog.at_level(logging.WARNING): assert listener.resolve(make_hdr(on()), on()) assert "Caught an exception while executing callback" in caplog.text assert "RuntimeError: Uh oh" in caplog.text async def test_listener_callback_matches(): listener = listeners.CallbackListener( matchers=[lambda hdr, command: True], callback=mock.Mock(), ) assert listener.resolve(make_hdr(off()), off()) assert listener.callback.mock_calls == [mock.call(make_hdr(off()), off())] async def test_listener_callback_no_matches(): listener = listeners.CallbackListener( matchers=[lambda hdr, command: False], callback=mock.Mock(), ) assert not listener.resolve(make_hdr(off()), off()) assert listener.callback.mock_calls == [] async def test_listener_callback_invalid_matcher(caplog): listener = listeners.CallbackListener( matchers=[object()], callback=mock.Mock(), ) with caplog.at_level(logging.WARNING): assert not listener.resolve(make_hdr(off()), off()) assert listener.callback.mock_calls == [] assert f"Matcher {listener.matchers[0]!r} and command" in caplog.text async def test_listener_callback_invalid_call(caplog): listener = listeners.CallbackListener( matchers=[on()], callback=mock.Mock(), ) with caplog.at_level(logging.WARNING): assert not listener.resolve(make_hdr(on()), b"data") assert listener.callback.mock_calls == [] assert f"Matcher {listener.matchers[0]!r} and command" in caplog.text async def test_listener_callback_zdo(caplog): listener = listeners.CallbackListener( matchers=[ query_next_image(manufacturer_code=0x1234), ], callback=mock.Mock(), ) zdo_hdr = zdo_t.ZDOHeader(command_id=zdo_t.ZDOCmd.NWK_addr_req, tsn=0x01) zdo_cmd = [0x0000] with caplog.at_level(logging.WARNING): assert not listener.resolve(zdo_hdr, zdo_cmd) assert caplog.text == "" zigpy-0.62.3/tests/test_ota.py000066400000000000000000000174141456054056700163250ustar00rootroot00000000000000import datetime import pytest import zigpy.application from zigpy.config import ( CONF_OTA, CONF_OTA_ALLOW_FILE_PROVIDERS, CONF_OTA_ALLOW_FILE_PROVIDERS_STRING, CONF_OTA_DIR, ) import zigpy.ota import zigpy.ota.image import zigpy.ota.provider import zigpy.ota.validators from .async_mock import AsyncMock, MagicMock, patch, sentinel from .conftest import make_app MANUFACTURER_ID = sentinel.manufacturer_id IMAGE_TYPE = sentinel.image_type @pytest.fixture def image_with_version(): def img(version=100): img = zigpy.ota.image.OTAImage() img.header = zigpy.ota.image.OTAImageHeader() img.header.manufacturer_id = MANUFACTURER_ID img.header.image_type = IMAGE_TYPE img.header.file_version = version img.subelements = [ zigpy.ota.image.SubElement.deserialize(b"\x00\x00\x04\x00\x00\x00abcdef")[0] ] return img return img @pytest.fixture def image(image_with_version): return image_with_version() @pytest.fixture def key(): return zigpy.ota.image.ImageKey(MANUFACTURER_ID, IMAGE_TYPE) @pytest.fixture def ota(): app = MagicMock(spec_set=zigpy.application.ControllerApplication) tradfri = MagicMock(spec_set=zigpy.ota.provider.Trådfri) check_invalid = MagicMock( spec_set=zigpy.ota.validators.check_invalid, return_value=False, ) with patch("zigpy.ota.provider.Trådfri", tradfri): with patch("zigpy.ota.check_invalid", check_invalid): yield zigpy.ota.OTA(app) @pytest.mark.parametrize( ("config", "result"), [ ({}, False), ( { CONF_OTA_ALLOW_FILE_PROVIDERS: ( CONF_OTA_ALLOW_FILE_PROVIDERS_STRING + " " ), CONF_OTA_DIR: "/dev/null", }, False, ), ( { CONF_OTA_ALLOW_FILE_PROVIDERS: CONF_OTA_ALLOW_FILE_PROVIDERS_STRING, CONF_OTA_DIR: "/dev/null", }, True, ), ], ) async def test_ota_file_provider_gate(config: dict, result: bool): with patch("zigpy.ota.provider.FileStore", new=AsyncMock()) as mock_file_store: app = make_app({CONF_OTA: config}) app.start_network = AsyncMock(wraps=app.start_network) app.form_network = AsyncMock() app.permit = AsyncMock() await app.startup(auto_form=False) assert len(mock_file_store.mock_calls) == int(result) async def test_ota_initialize(ota): ota.async_event = AsyncMock() await ota.initialize() assert ota.async_event.call_count == 1 assert ota.async_event.call_args[0][0] == "initialize_provider" assert ota.not_initialized is False async def test_get_image_empty(ota, image, key): ota.async_event = AsyncMock(return_value=[None, None]) assert len(ota._image_cache) == 0 res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE) assert len(ota._image_cache) == 0 assert res is None assert ota.async_event.call_count == 1 assert ota.async_event.call_args[0][0] == "get_image" assert ota.async_event.call_args[0][1] == key async def test_get_image_new(ota, image, key, image_with_version, monkeypatch): newer = image_with_version(image.header.file_version + 1) ota.async_event = AsyncMock(return_value=[None, image, newer]) assert len(ota._image_cache) == 0 res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE) # got new image in the cache assert len(ota._image_cache) == 1 assert res.image.header == newer.header assert res.image.subelements == newer.subelements assert ota.async_event.call_count == 1 assert ota.async_event.call_args[0][0] == "get_image" assert ota.async_event.call_args[0][1] == key ota.async_event.reset_mock() assert len(ota._image_cache) == 1 res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE) # should get just the cached image assert len(ota._image_cache) == 1 assert res.image.header == newer.header assert res.image.subelements == newer.subelements assert ota.async_event.call_count == 0 # on cache expiration, ping listeners ota.async_event.reset_mock() assert len(ota._image_cache) == 1 monkeypatch.setattr( zigpy.ota, "TIMEDELTA_0", zigpy.ota.CachedImage.DEFAULT_EXPIRATION + datetime.timedelta(seconds=1), ) res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE) assert len(ota._image_cache) == 1 assert res.image.header == newer.header assert res.image.subelements == newer.subelements assert ota.async_event.call_count == 1 async def test_get_image_invalid(ota, image, image_with_version): corrupted = image_with_version(image.header.file_version) zigpy.ota.check_invalid.side_effect = [True] ota.async_event = AsyncMock(return_value=[None, corrupted]) assert len(ota._image_cache) == 0 res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE) assert len(ota._image_cache) == 0 assert res is None @pytest.mark.parametrize("v1", [0, 1]) @pytest.mark.parametrize("v2", [0, 1]) async def test_get_image_invalid_then_valid_versions(v1, v2, ota, image_with_version): image = image_with_version(100 + v1) image.header.header_string = b"\x12" * 32 corrupted = image_with_version(100 + v2) corrupted.header.header_string = b"\x11" * 32 ota.async_event = AsyncMock(return_value=[corrupted, image]) zigpy.ota.check_invalid.side_effect = [True, False] res = await ota.get_ota_image(MANUFACTURER_ID, IMAGE_TYPE) # The valid image is always picked, even if the versions match assert res.image.header.header_string == image.header.header_string def test_cached_image_expiration(image, monkeypatch): cached = zigpy.ota.CachedImage.new(image) assert cached.expired is False monkeypatch.setattr( zigpy.ota, "TIMEDELTA_0", zigpy.ota.CachedImage.DEFAULT_EXPIRATION + datetime.timedelta(seconds=1), ) assert cached.expired is True def test_cached_image_no_expiration(image, monkeypatch): cached = zigpy.ota.CachedImage() monkeypatch.setattr( zigpy.ota, "TIMEDELTA_0", zigpy.ota.CachedImage.DEFAULT_EXPIRATION + datetime.timedelta(seconds=1), ) assert cached.expired is False def test_cached_image_expiration_delay(): d = b"\x1e\xf1\xee\x0b\x00\x018\x00" d += b"\x00\x00" d += ( b"|\x11\x01!rE!\x12\x02\x00EBL tradfri_light_basic\x00\x00\x00" b"\x00\x00\x00\x00\x00\x00\x38\x00\x00\x00" ) img = zigpy.ota.image.OTAImage.deserialize(d)[0] cached = zigpy.ota.CachedImage.new(img) orig_expiration = cached.expires_on cached.get_image_block(0, 40) assert cached.expires_on == orig_expiration new_expiration = ( cached.expires_on - zigpy.ota.CachedImage.DEFAULT_EXPIRATION + zigpy.ota.DELAY_EXPIRATION - datetime.timedelta(seconds=10) ) cached.expires_on = new_expiration cached.get_image_block(0, 40) assert cached.expires_on > new_expiration def test_cached_image_serialization_cache(image): image = MagicMock(image) image.serialize.side_effect = [b"data"] cached = zigpy.ota.CachedImage.new(image) assert cached.cached_data is None assert image.serialize.call_count == 0 assert cached.get_image_block(0, 1) == b"d" assert cached.get_image_block(1, 1) == b"a" assert cached.get_image_block(2, 1) == b"t" assert image.serialize.call_count == 1 async def test_get_image_salus(ota, image, image_with_version): SALUS_ID = 4216 newer = image_with_version(image.header.file_version + 1) ota.async_event = AsyncMock(return_value=[None, image, newer]) assert len(ota._image_cache) == 0 await ota.get_ota_image(SALUS_ID, "model123") # got new image in the cache assert len(ota._image_cache) == 1 zigpy-0.62.3/tests/test_ota_image.py000066400000000000000000000330731456054056700174660ustar00rootroot00000000000000import hashlib from unittest import mock import pytest from zigpy.ota import CachedImage import zigpy.ota.image as firmware import zigpy.types as t MANUFACTURER_ID = mock.sentinel.manufacturer_id IMAGE_TYPE = mock.sentinel.image_type @pytest.fixture def image(): img = firmware.OTAImage() img.header = firmware.OTAImageHeader( upgrade_file_id=firmware.OTAImageHeader.MAGIC_VALUE, header_version=256, header_length=56, field_control=0, manufacturer_id=9876, image_type=123, file_version=12345, stack_version=2, header_string="This is a test header!", image_size=56 + 2 + 4 + 4, ) img.subelements = [firmware.SubElement(tag_id=0x0000, data=b"data")] return img @pytest.fixture def key(): return firmware.ImageKey(MANUFACTURER_ID, IMAGE_TYPE) def test_firmware_key(): key = firmware.ImageKey(MANUFACTURER_ID, IMAGE_TYPE) assert key.manufacturer_id is MANUFACTURER_ID assert key.image_type is IMAGE_TYPE def test_hw_version(): hw = firmware.HWVersion(0x0A01) assert hw.version == 10 assert hw.revision == 1 assert "version=10" in repr(hw) assert "revision=1" in repr(hw) def _test_ota_img_header(field_control, hdr_suffix=b"", extra=b""): d = b"\x1e\xf1\xee\x0b\x00\x018\x00" d += field_control d += ( b"|\x11\x01!rE!\x12\x02\x00EBL tradfri_light_basic\x00\x00\x00" b"\x00\x00\x00\x00\x00\x00~\x91\x02\x00" ) d += hdr_suffix hdr, rest = firmware.OTAImageHeader.deserialize(d + extra) assert hdr.header_version == 0x0100 assert hdr.header_length == 0x0038 assert hdr.manufacturer_id == 4476 assert hdr.image_type == 0x2101 assert hdr.file_version == 0x12214572 assert hdr.stack_version == 0x0002 assert hdr.image_size == 0x0002917E assert hdr.serialize() == d return hdr, rest def test_ota_image_header(): hdr = firmware.OTAImageHeader() assert hdr.security_credential_version_present is None assert hdr.device_specific_file is None assert hdr.hardware_versions_present is None extra = b"abcdefghklmnpqr" hdr, rest = _test_ota_img_header(b"\x00\x00", extra=extra) assert rest == extra assert hdr.security_credential_version_present is False assert hdr.device_specific_file is False assert hdr.hardware_versions_present is False def test_ota_image_header_security(): extra = b"abcdefghklmnpqr" creds = t.uint8_t(0xAC) hdr, rest = _test_ota_img_header(b"\x01\x00", creds.serialize(), extra) assert rest == extra assert hdr.security_credential_version_present is True assert hdr.security_credential_version == creds assert hdr.device_specific_file is False assert hdr.hardware_versions_present is False def test_ota_image_header_hardware_versions(): extra = b"abcdefghklmnpqr" hw_min = firmware.HWVersion(0xBEEF) hw_max = firmware.HWVersion(0xABCD) hdr, rest = _test_ota_img_header( b"\x04\x00", hw_min.serialize() + hw_max.serialize(), extra ) assert rest == extra assert hdr.security_credential_version_present is False assert hdr.device_specific_file is False assert hdr.hardware_versions_present is True assert hdr.minimum_hardware_version == hw_min assert hdr.maximum_hardware_version == hw_max def test_ota_image_destination(): extra = b"abcdefghklmnpqr" dst = t.EUI64.deserialize(b"12345678")[0] hdr, rest = _test_ota_img_header(b"\x02\x00", dst.serialize(), extra) assert rest == extra assert hdr.security_credential_version_present is False assert hdr.device_specific_file is True assert hdr.upgrade_file_destination == dst assert hdr.hardware_versions_present is False def test_ota_img_wrong_header(): d = b"\x1e\xf0\xee\x0b\x00\x018\x00\x00\x00" d += ( b"|\x11\x01!rE!\x12\x02\x00EBL tradfri_light_basic\x00\x00\x00" b"\x00\x00\x00\x00\x00\x00~\x91\x02\x00" ) with pytest.raises(ValueError): firmware.OTAImageHeader.deserialize(d) with pytest.raises(ValueError): firmware.OTAImageHeader.deserialize(d + b"123abc") def test_header_string(): size = 32 header_string = "This is a header String" data = header_string.encode("utf8").ljust(size, b"\x00") extra = b"cdef123" hdr_str, rest = firmware.HeaderString.deserialize(data + extra) assert rest == extra assert hdr_str == header_string hdr_str, rest = firmware.HeaderString.deserialize(data) assert rest == b"" assert hdr_str == header_string assert firmware.HeaderString(header_string).serialize() == data def test_header_string_too_short(): header_string = "This is a header String" data = header_string.encode("utf8") with pytest.raises(ValueError): firmware.HeaderString.deserialize(data) def test_subelement(): payload = b"\x00payload\xff" data = b"\x01\x00" + t.uint32_t(len(payload)).serialize() + payload extra = b"extra" e, rest = firmware.SubElement.deserialize(data + extra) assert rest == extra assert e.tag_id == firmware.ElementTagId.ECDSA_SIGNATURE_CRYPTO_SUITE_1 assert e.data == payload assert len(e.data) == len(payload) assert e.serialize() == data def test_subelement_too_short(): for i in range(1, 5): with pytest.raises(ValueError): firmware.SubElement.deserialize(b"".ljust(i, b"\x00")) e, rest = firmware.SubElement.deserialize(b"\x00\x00\x00\x00\x00\x00") assert e.data == b"" assert rest == b"" with pytest.raises(ValueError): firmware.SubElement.deserialize(b"\x00\x02\x02\x00\x00\x00a") @pytest.fixture def raw_header(): def data(elements_size=0): d = b"\x1e\xf1\xee\x0b\x00\x018\x00\x00\x00" d += b"|\x11\x01!rE!\x12\x02\x00EBL tradfri_light_basic\x00\x00\x00" d += b"\x00\x00\x00\x00\x00\x00" d += t.uint32_t(elements_size + 56).serialize() return d return data @pytest.fixture def raw_sub_element(): def data(tag_id, payload=b""): r = t.uint16_t(tag_id).serialize() r += t.uint32_t(len(payload)).serialize() return r + payload return data def test_ota_image(raw_header, raw_sub_element): el1_payload = b"abcd" el2_payload = b"4321" el1 = raw_sub_element(0, el1_payload) el2 = raw_sub_element(1, el2_payload) extra = b"edbc321" img, rest = firmware.OTAImage.deserialize( raw_header(len(el1 + el2)) + el1 + el2 + extra ) assert rest == extra assert len(img.subelements) == 2 assert img.subelements[0].tag_id == 0 assert img.subelements[0].data == el1_payload assert img.subelements[1].tag_id == 1 assert img.subelements[1].data == el2_payload assert img.serialize() == raw_header(len(el1 + el2)) + el1 + el2 with pytest.raises(ValueError): firmware.OTAImage.deserialize(raw_header(len(el1 + el2)) + el1 + el2[:-1]) def test_ota_img_should_upgrade(): manufacturer_id = 0x2345 image_type = 0x4567 version = 0xABBA hdr = firmware.OTAImageHeader() hdr.manufacturer_id = manufacturer_id hdr.image_type = image_type hdr.file_version = version img = CachedImage(firmware.OTAImage(hdr)) assert img.should_update(manufacturer_id, image_type, version) is False assert img.should_update(manufacturer_id, image_type, version - 1) is True assert img.should_update(manufacturer_id, image_type - 1, version - 1) is False assert img.should_update(manufacturer_id, image_type + 1, version - 1) is False assert img.should_update(manufacturer_id - 1, image_type, version - 1) is False assert img.should_update(manufacturer_id + 1, image_type, version - 1) is False def test_ota_img_should_upgrade_hw_ver(): manufacturer_id = 0x2345 image_type = 0x4567 version = 0xABBA hdr = firmware.OTAImageHeader() hdr.field_control = 0x0004 hdr.manufacturer_id = manufacturer_id hdr.image_type = image_type hdr.file_version = version hdr.minimum_hardware_version = 2 hdr.maximum_hardware_version = 4 img = CachedImage(firmware.OTAImage(hdr)) assert img.should_update(manufacturer_id, image_type, version - 1) is True for hw_ver in range(2, 4): assert ( img.should_update(manufacturer_id, image_type, version - 1, hw_ver) is True ) assert img.should_update(manufacturer_id, image_type, version - 1, 1) is False assert img.should_update(manufacturer_id, image_type, version - 1, 5) is False def test_get_image_block(raw_header, raw_sub_element): el1_payload = b"abcd" el2_payload = b"4321" el1 = raw_sub_element(0, el1_payload) el2 = raw_sub_element(1, el2_payload) raw_data = raw_header(len(el1 + el2)) + el1 + el2 img = CachedImage(firmware.OTAImage.deserialize(raw_data)[0]) offset, size = 28, 20 block = img.get_image_block(offset, size) assert block == raw_data[offset : offset + min(size, img.MAXIMUM_DATA_SIZE)] offset, size = 30, 50 block = img.get_image_block(offset, size) assert block == raw_data[offset : offset + min(size, img.MAXIMUM_DATA_SIZE)] def test_get_image_block_offset_too_large(raw_header, raw_sub_element): el1_payload = b"abcd" el2_payload = b"4321" el1 = raw_sub_element(0, el1_payload) el2 = raw_sub_element(1, el2_payload) raw_data = raw_header(len(el1 + el2)) + el1 + el2 img = CachedImage(firmware.OTAImage.deserialize(raw_data)[0]) offset, size = len(raw_data) + 1, 44 with pytest.raises(ValueError): img.get_image_block(offset, size) def test_cached_image_wrapping(image): cached_img = CachedImage(image) assert cached_img.header is image.header def test_cached_image_serialize(image): cached_img = CachedImage(image) cached_image_data = cached_img.serialize() assert cached_image_data == image.serialize() def wrap_ikea(data): header = bytearray(100) header[0:4] = b"NGIS" header[16:20] = len(header).to_bytes(4, "little") header[20:24] = len(data).to_bytes(4, "little") return header + data + b"F" * 512 def test_parse_ota_normal(image): assert firmware.parse_ota_image(image.serialize()) == (image, b"") def test_parse_ota_ikea(image): data = wrap_ikea(image.serialize()) assert firmware.parse_ota_image(data) == (image, b"") def test_parse_ota_ikea_trailing(image): data = wrap_ikea(image.serialize() + b"trailing") parsed, remaining = firmware.parse_ota_image(data) assert not remaining assert parsed.header.image_size == len(image.serialize() + b"trailing") assert parsed.subelements[0].data == b"data" + b"trailing" parsed2, remaining2 = firmware.OTAImage.deserialize(parsed.serialize()) assert not remaining2 @pytest.mark.parametrize( "data", [ b"NGIS" + b"truncated", b"NGIS" + b"long enough to container header but not actual image", ], ) def test_parse_ota_ikea_truncated(data): with pytest.raises(ValueError): firmware.parse_ota_image(data) def create_hue_ota(data): data = b"\x2A\x00\x01" + data header, _ = firmware.OTAImageHeader.deserialize( bytes.fromhex( "1ef1ee0b0001380000000b100301d5670042020000000000000000000000000000000000000000" "0000000000000000000000000038f00300" ) ) header.image_size = len(header.serialize()) + len(data) return header.serialize() + data def test_parse_ota_hue(): data = create_hue_ota(b"test") + b"rest" img, rest = firmware.parse_ota_image(data) assert isinstance(img, firmware.HueSBLOTAImage) assert rest == b"rest" assert img.data == b"\x2A\x00\x01" + b"test" assert img.serialize() + b"rest" == data def test_parse_ota_hue_invalid(): data = create_hue_ota(b"test") firmware.parse_ota_image(data) with pytest.raises(ValueError): firmware.parse_ota_image(data[:-1]) header, rest = firmware.OTAImageHeader.deserialize(data) assert data == header.serialize() + rest with pytest.raises(ValueError): # Three byte sequence must be the first thing after the header firmware.parse_ota_image(header.serialize() + b"\xFF" + rest[1:]) with pytest.raises(ValueError): # Only Hue is known to use these images firmware.parse_ota_image(header.replace(manufacturer_id=12).serialize() + rest) def test_legrand_container_unwrapping(image): # Unwrapped size prefix and 1 + 16 byte suffix data = ( t.uint32_t(len(image.serialize())).serialize() + image.serialize() + b"\x01" + b"abcdabcdabcdabcd" ) with pytest.raises(ValueError): firmware.parse_ota_image(data[:-1]) with pytest.raises(ValueError): firmware.parse_ota_image(b"\xFF" + data[1:]) img, rest = firmware.parse_ota_image(data) assert not rest assert img == image def test_thirdreality_container(image): image_bytes = image.serialize() # There's little useful information in the header subcontainer = ( t.uint32_t(16).serialize() # Total length of image, excluding SHA512 prefix + t.uint32_t(len(image_bytes) + 152 - 64).serialize() + t.uint32_t(152).serialize() # Unknown four byte prefix/suffix and what looks like a second SHA512 hash + b"?" * (64 + 4) + t.uint32_t(0).serialize() + t.uint32_t(0).serialize() + image_bytes ) data = hashlib.sha512(subcontainer).digest() + subcontainer assert data.index(image_bytes) == 152 img, rest = firmware.parse_ota_image(data) assert not rest assert img == image with pytest.raises(ValueError): firmware.parse_ota_image(data[:-1]) with pytest.raises(ValueError): firmware.parse_ota_image(b"\xFF" + data[1:]) zigpy-0.62.3/tests/test_ota_provider.py000066400000000000000000000470571456054056700202450ustar00rootroot00000000000000import hashlib import os.path from unittest import mock import uuid import pytest from zigpy.config import CONF_OTA_DIR, CONF_OTA_IKEA import zigpy.ota import zigpy.ota.image import zigpy.ota.provider as ota_p from .async_mock import AsyncMock, patch from .test_ota_image import image # noqa: F401 MANUFACTURER_ID = 4476 IMAGE_TYPE = mock.sentinel.image_type @pytest.fixture def file_image_name(tmp_path, image): # noqa: F811 def ota_img_filename(name="ota-image"): image_file = tmp_path / (name + "-" + str(uuid.uuid4())) image_file.write_bytes(image.serialize()) return str(image_file) return ota_img_filename @pytest.fixture def file_image(file_image_name): img = ota_p.FileImage() img.file_name = file_image_name() img.manufacturer_id = MANUFACTURER_ID img.image_type = IMAGE_TYPE return img @pytest.fixture def file_prov(): p = ota_p.FileStore() p.enable() return p @pytest.fixture def file_image_with_version(file_image_name): def img(version=100, image_type=IMAGE_TYPE): img = ota_p.FileImage() img.file_name = file_image_name() img.header.file_version = version img.header.manufacturer_id = MANUFACTURER_ID img.header.image_type = image_type return img return img @pytest.fixture def ikea_image_with_version(): def img(image_type=IMAGE_TYPE): img = zigpy.ota.provider.IKEAImage( image_type=image_type, binary_url=mock.sentinel.url, sha3_256_sum=mock.sentinel.sha3_256_sum, ) return img return img @pytest.fixture def ikea_image(ikea_image_with_version): return ikea_image_with_version() @pytest.fixture def basic_prov(): class Prov(ota_p.Basic): async def initialize_provider(self, ota_config): return None async def refresh_firmware_list(self): return None p = Prov() p.enable() return p @pytest.fixture def ikea_prov(): p = ota_p.Trådfri() p.enable() return p @pytest.fixture def key(): return zigpy.ota.image.ImageKey(MANUFACTURER_ID, IMAGE_TYPE) def test_expiration(ikea_prov): # if we never refreshed firmware list then we should be expired assert ikea_prov.expired async def test_initialize_provider(basic_prov): await basic_prov.initialize_provider(mock.sentinel.ota_dir) async def test_basic_get_image(basic_prov, key): image = mock.MagicMock() # noqa: F811 image.fetch_image = AsyncMock(return_value=mock.sentinel.image) basic_prov._cache = mock.MagicMock() basic_prov._cache.__getitem__.return_value = image basic_prov.refresh_firmware_list = AsyncMock() # check when disabled basic_prov.disable() r = await basic_prov.get_image(key) assert r is None assert basic_prov.refresh_firmware_list.call_count == 0 assert basic_prov._cache.__getitem__.call_count == 0 assert image.fetch_image.call_count == 0 # check with locked image basic_prov.enable() await basic_prov._locks[key].acquire() r = await basic_prov.get_image(key) assert r is None assert basic_prov.refresh_firmware_list.call_count == 0 assert basic_prov._cache.__getitem__.call_count == 0 assert image.fetch_image.call_count == 0 # unlocked image basic_prov._locks.pop(key) r = await basic_prov.get_image(key) assert r is mock.sentinel.image assert basic_prov.refresh_firmware_list.call_count == 1 assert basic_prov._cache.__getitem__.call_count == 1 assert basic_prov._cache.__getitem__.call_args[0][0] == key assert image.fetch_image.call_count == 1 def test_basic_enable_provider(key, basic_prov): assert basic_prov.is_enabled is True basic_prov.disable() assert basic_prov.is_enabled is False basic_prov.enable() assert basic_prov.is_enabled is True async def test_basic_get_image_filtered(basic_prov, key): image = mock.MagicMock() # noqa: F811 image.fetch_image = AsyncMock(return_value=mock.sentinel.image) basic_prov._cache = mock.MagicMock() basic_prov._cache.__getitem__.return_value = image basic_prov.refresh_firmware_list = AsyncMock() basic_prov.filter_get_image = AsyncMock(return_value=True) r = await basic_prov.get_image(key) assert r is None assert basic_prov.filter_get_image.call_count == 1 assert basic_prov.filter_get_image.call_args[0][0] == key assert basic_prov.refresh_firmware_list.call_count == 0 assert basic_prov._cache.__getitem__.call_count == 0 assert image.fetch_image.call_count == 0 async def test_ikea_init_ota_dir(ikea_prov): ikea_prov.enable = mock.MagicMock() ikea_prov.refresh_firmware_list = AsyncMock() r = await ikea_prov.initialize_provider({CONF_OTA_IKEA: True}) assert r is None assert ikea_prov.enable.call_count == 1 assert ikea_prov.refresh_firmware_list.call_count == 1 async def test_ikea_get_image_no_cache(ikea_prov, ikea_image): ikea_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) ikea_prov._cache = mock.MagicMock() ikea_prov._cache.__getitem__.side_effect = KeyError() ikea_prov.refresh_firmware_list = AsyncMock() non_ikea = zigpy.ota.image.ImageKey( ota_p.Trådfri.MANUFACTURER_ID + 1, IMAGE_TYPE, ) # Non IKEA manufacturer_id, don't bother doing anything at all r = await ikea_prov.get_image(non_ikea) assert r is None assert ikea_prov._cache.__getitem__.call_count == 0 assert ikea_prov.refresh_firmware_list.call_count == 0 assert non_ikea not in ikea_prov._cache # IKEA manufacturer_id, but not in cache assert ikea_image.key not in ikea_prov._cache r = await ikea_prov.get_image(ikea_image.key) assert r is None assert ikea_prov.refresh_firmware_list.call_count == 1 assert ikea_prov._cache.__getitem__.call_count == 1 assert ikea_image.fetch_image.call_count == 0 async def test_ikea_get_image(ikea_prov, key, ikea_image): ikea_image.fetch_image = AsyncMock(return_value=mock.sentinel.image) ikea_prov._cache = mock.MagicMock() ikea_prov._cache.__getitem__.return_value = ikea_image ikea_prov.refresh_firmware_list = AsyncMock() r = await ikea_prov.get_image(key) assert r is mock.sentinel.image assert ikea_prov._cache.__getitem__.call_count == 1 assert ikea_prov._cache.__getitem__.call_args[0][0] == ikea_image.key assert ikea_image.fetch_image.call_count == 1 @patch("aiohttp.ClientSession.get") async def test_ikea_refresh_list(mock_get, ikea_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock( side_effect=[ [ { "fw_image_type": 4557, "fw_type": 2, "fw_sha3_256": "896edfb0a9d8314fb49d44fb11dc91fb5bb55e2ee1f793d53189cb13f884e13c", "fw_binary_url": "https://fw.ota.homesmart.ikea.com/files/rodret-dimmer-soc_release_prod_v16777287_9812b73c-b02e-4678-b737-d21251a34fd2.ota", }, { "fw_update_prio": 5, "fw_filesize": 242071587, "fw_type": 3, "fw_hotfix_version": 1, "fw_major_version": 2, "fw_binary_checksum": "8c17b203bede63ea53e36d345b628cc7f2faecc18d4406458a12f8f25e54718a24495d30a03fe3244799bfaa50de72d99e6c0d2f7553a8465e37c10c22ba75fc", "fw_minor_version": 453, "fw_sha3_256": "657ed8fd0f6e5e6700acdc6afd64829cebacb1dd03b3f5453258b4bd77b674ed", "fw_binary_url": "https://fw.ota.homesmart.ikea.com/files/DIRIGERA_release_prod_v2.453.1_348f0dce-3c34-49a2-b64c-a1caa202104c.raucb", }, { "fw_image_type": 4552, "fw_type": 2, "fw_sha3_256": "1b5fbea79c5b41864352a938a90ad25d9a0118054bf1cdc0314ef9636a60143a", "fw_binary_url": "https://fw.ota.homesmart.ikea.com/files/tradfri-motion-sensor2_release_prod_v604241925_8afa2f7c-19c3-4ddf-a96c-233714179022.ota", }, ] ] ) mock_get.return_value.__aenter__.return_value.status = 200 mock_get.return_value.__aenter__.return_value.reason = "OK" await ikea_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert len(ikea_prov._cache) == 2 image1 = ikea_prov._cache[ zigpy.ota.image.ImageKey(ota_p.Trådfri.MANUFACTURER_ID, 4557) ] image2 = ikea_prov._cache[ zigpy.ota.image.ImageKey(ota_p.Trådfri.MANUFACTURER_ID, 4552) ] assert image1 == ota_p.IKEAImage( image_type=4557, binary_url="https://fw.ota.homesmart.ikea.com/files/rodret-dimmer-soc_release_prod_v16777287_9812b73c-b02e-4678-b737-d21251a34fd2.ota", sha3_256_sum="896edfb0a9d8314fb49d44fb11dc91fb5bb55e2ee1f793d53189cb13f884e13c", ) assert image1.version == 16777287 assert image2 == ota_p.IKEAImage( image_type=4552, binary_url="https://fw.ota.homesmart.ikea.com/files/tradfri-motion-sensor2_release_prod_v604241925_8afa2f7c-19c3-4ddf-a96c-233714179022.ota", sha3_256_sum="1b5fbea79c5b41864352a938a90ad25d9a0118054bf1cdc0314ef9636a60143a", ) assert image2.version == 604241925 assert not ikea_prov.expired def test_ikea_bad_version(): image = ota_p.IKEAImage( image_type=4552, binary_url="https://fw.ota.homesmart.ikea.com/files/DIRIGERA_release_prod_v2.453.1_348f0dce-3c34-49a2-b64c-a1caa202104c.raucb", sha3_256_sum="1b5fbea79c5b41864352a938a90ad25d9a0118054bf1cdc0314ef9636a60143a", ) with pytest.raises(ValueError): image.version @patch("aiohttp.ClientSession.get") async def test_ikea_refresh_list_locked(mock_get, ikea_prov): await ikea_prov._locks[ota_p.LOCK_REFRESH].acquire() mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" await ikea_prov.refresh_firmware_list() assert mock_get.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_ikea_refresh_list_failed(mock_get, ikea_prov): mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]]) mock_get.return_value.__aenter__.return_value.status = 434 mock_get.return_value.__aenter__.return_value.reason = "UNK" with patch.object(ikea_prov, "update_expiration") as update_exp: await ikea_prov.refresh_firmware_list() assert mock_get.call_count == 1 assert update_exp.call_count == 0 @patch("aiohttp.ClientSession.get") async def test_ikea_fetch_image(mock_get, ikea_image_with_version): data = bytes.fromhex( "1ef1ee0b0001380000007c11012178563412020054657374204f544120496d61" "676500000000000000000000000000000000000042000000" ) sub_el = b"\x00\x00\x04\x00\x00\x00abcd" container = bytearray(b"\x00This is extra data\x00\x55\xaa" * 100) container[0:4] = b"NGIS" container[16:20] = (512).to_bytes(4, "little") # offset container[20:24] = len(data + sub_el).to_bytes(4, "little") # size container[512 : 512 + len(data) + len(sub_el)] = data + sub_el img = ikea_image_with_version(image_type=0x2101) img.url = mock.sentinel.url img.sha3_256_sum = hashlib.sha3_256(container).hexdigest() mock_get.return_value.__aenter__.return_value.read = AsyncMock( side_effect=[container] ) r = await img.fetch_image() assert isinstance(r, zigpy.ota.image.OTAImage) assert mock_get.call_count == 1 assert mock_get.call_args[0][0] == mock.sentinel.url assert r.serialize() == data + sub_el def test_file_image_key(key): fimg = ota_p.FileImage() fimg.header.manufacturer_id = MANUFACTURER_ID fimg.header.image_type = IMAGE_TYPE fimg.header.file_version = mock.sentinel.version assert fimg.key == key assert fimg.version == mock.sentinel.version def test_filestore_scan(file_image_name): file_name = file_image_name() r = ota_p.FileImage.scan_image(file_name) assert isinstance(r, ota_p.FileImage) assert r.file_name == file_name def test_filestore_scan_exc(file_image_name): ota_file = file_image_name() with patch("builtins.open", mock.mock_open()) as mock_file: mock_file.side_effect = OSError() r = ota_p.FileImage.scan_image(ota_file) assert r is None assert mock_file.call_count == 1 assert mock_file.call_args[0][0] == ota_file with patch("builtins.open", mock.mock_open()) as mock_file: mock_file.side_effect = ValueError() r = ota_p.FileImage.scan_image(ota_file) assert r is None assert mock_file.call_count == 1 assert mock_file.call_args[0][0] == ota_file def test_filestore_scan_uncaught_exc(file_image_name): ota_file = file_image_name() with pytest.raises(RuntimeError): with patch("builtins.open", mock.mock_open()) as mock_file: mock_file.side_effect = RuntimeError() ota_p.FileImage.scan_image(ota_file) assert mock_file.call_count == 1 assert mock_file.call_args[0][0] == ota_file async def test_filestore_fetch_image(file_image): r = await ota_p.FileImage.fetch_image(file_image) assert isinstance(r, zigpy.ota.image.OTAImage) async def test_filestore_fetch_image_exc(file_image): with mock.patch("builtins.open", mock.mock_open()) as mock_file: mock_file.side_effect = OSError() r = await ota_p.FileImage.fetch_image(file_image) assert r is None assert mock_file.call_count == 1 assert mock_file.call_args[0][0] == file_image.file_name with mock.patch("builtins.open", mock.mock_open()) as mock_file: mock_file.side_effect = ValueError() r = await ota_p.FileImage.fetch_image(file_image) assert r is None assert mock_file.call_count == 1 assert mock_file.call_args[0][0] == file_image.file_name async def test_filestore_fetch_uncaught_exc(file_image): with pytest.raises(RuntimeError): with mock.patch("builtins.open", mock.mock_open()) as mock_file: mock_file.side_effect = RuntimeError() await ota_p.FileImage.fetch_image(file_image) assert mock_file.call_count == 1 assert mock_file.call_args[0][0] == file_image.file_name def test_filestore_validate_ota_dir(tmp_path): file_prov = ota_p.FileStore() assert file_prov.validate_ota_dir(None) is None assert file_prov.validate_ota_dir(str(tmp_path)) == str(tmp_path) # non existing dir non_existing = tmp_path / "non_existing" assert file_prov.validate_ota_dir(str(non_existing)) is None # file instead of dir file_path = tmp_path / "file" file_path.touch() assert file_prov.validate_ota_dir(str(file_path)) is None async def test_filestore_init_provider_success(file_prov): file_prov.enable = mock.MagicMock() file_prov.refresh_firmware_list = AsyncMock() file_prov.validate_ota_dir = mock.MagicMock(return_value=mock.sentinel.ota_dir) r = await file_prov.initialize_provider({CONF_OTA_DIR: mock.sentinel.ota_dir}) assert r is None assert file_prov.validate_ota_dir.call_count == 1 assert file_prov.validate_ota_dir.call_args[0][0] == mock.sentinel.ota_dir assert file_prov.enable.call_count == 1 assert file_prov.refresh_firmware_list.call_count == 1 async def test_filestore_init_provider_failure(file_prov): file_prov.enable = mock.MagicMock() file_prov.refresh_firmware_list = AsyncMock() file_prov.validate_ota_dir = mock.MagicMock(return_value=None) r = await file_prov.initialize_provider({CONF_OTA_DIR: mock.sentinel.ota_dir}) assert r is None assert file_prov.validate_ota_dir.call_count == 1 assert file_prov.validate_ota_dir.call_args[0][0] == mock.sentinel.ota_dir assert file_prov.enable.call_count == 0 assert file_prov.refresh_firmware_list.call_count == 0 async def test_filestore_refresh_firmware_list( file_prov, file_image_with_version, monkeypatch ): image_1 = file_image_with_version(image_type=mock.sentinel.image_1) image_2 = file_image_with_version(image_type=mock.sentinel.image_2) _ = file_image_with_version(image_type=mock.sentinel.image_3) images = (image_1, None, image_2) ota_dir = os.path.dirname(image_1.file_name) file_image_mock = mock.MagicMock() file_image_mock.scan_image.side_effect = images monkeypatch.setattr(ota_p, "FileImage", file_image_mock) file_prov.update_expiration = mock.MagicMock() r = await file_prov.refresh_firmware_list() assert r is None assert file_image_mock.scan_image.call_count == 0 assert file_prov.update_expiration.call_count == 0 assert len(file_prov._cache) == 0 # check with an ota_dir this time file_prov._ota_dir = ota_dir for file in ota_p.SKIP_OTA_FILES: with open(os.path.join(ota_dir, file), mode="w+"): pass r = await file_prov.refresh_firmware_list() assert r is None assert file_image_mock.scan_image.call_count == len(images) assert file_prov.update_expiration.call_count == 1 assert len(file_prov._cache) == len([img for img in images if img]) async def test_filestore_refresh_firmware_list_2( file_prov, file_image_with_version, monkeypatch ): """Test two files with same key and the same version.""" ver = 100 image_1 = file_image_with_version(version=ver) image_2 = file_image_with_version(version=ver) ota_dir = os.path.dirname(image_1.file_name) file_image_mock = mock.MagicMock() file_image_mock.scan_image.side_effect = [image_1, image_2] monkeypatch.setattr(ota_p, "FileImage", file_image_mock) file_prov.update_expiration = mock.MagicMock() file_prov._ota_dir = ota_dir r = await file_prov.refresh_firmware_list() assert r is None assert file_image_mock.scan_image.call_count == 2 assert file_prov.update_expiration.call_count == 1 assert len(file_prov._cache) == 1 assert file_prov._cache[image_1.key].version == ver async def test_filestore_refresh_firmware_list_3( file_prov, file_image_with_version, monkeypatch ): """Test two files with the same key, older, then newer versions.""" ver = 100 image_1 = file_image_with_version(version=(ver - 1)) image_2 = file_image_with_version(version=ver) ota_dir = os.path.dirname(image_1.file_name) file_image_mock = mock.MagicMock() file_image_mock.scan_image.side_effect = [image_1, image_2] monkeypatch.setattr(ota_p, "FileImage", file_image_mock) file_prov.update_expiration = mock.MagicMock() file_prov._ota_dir = ota_dir r = await file_prov.refresh_firmware_list() assert r is None assert file_image_mock.scan_image.call_count == 2 assert file_prov.update_expiration.call_count == 1 assert len(file_prov._cache) == 1 assert file_prov._cache[image_1.key].version == ver async def test_filestore_refresh_firmware_list_4( file_prov, file_image_with_version, monkeypatch ): """Test two files with the same key, newer, then older versions.""" ver = 100 image_1 = file_image_with_version(version=ver) image_2 = file_image_with_version(version=(ver - 1)) ota_dir = os.path.dirname(image_1.file_name) file_image_mock = mock.MagicMock() file_image_mock.scan_image.side_effect = [image_1, image_2] monkeypatch.setattr(ota_p, "FileImage", file_image_mock) file_prov.update_expiration = mock.MagicMock() file_prov._ota_dir = ota_dir r = await file_prov.refresh_firmware_list() assert r is None assert file_image_mock.scan_image.call_count == 2 assert file_prov.update_expiration.call_count == 1 assert len(file_prov._cache) == 1 assert file_prov._cache[image_1.key].version == ver zigpy-0.62.3/tests/test_ota_validators.py000066400000000000000000000174241456054056700205560ustar00rootroot00000000000000from unittest import mock import zlib import pytest from zigpy.ota import validators from zigpy.ota.image import ElementTagId, OTAImage, SubElement from zigpy.ota.validators import ValidationError, ValidationResult def create_ebl_image(tags): # All images start with a 140-byte "0x0000" header tags = [(b"\x00\x00", b"jklm" * 35)] + tags assert all(len(tag) == 2 for tag, value in tags) image = b"".join(tag + len(value).to_bytes(2, "big") + value for tag, value in tags) # And end with a checksum image += b"\xFC\x04\x00\x04" + zlib.crc32(image + b"\xFC\x04\x00\x04").to_bytes( 4, "little" ) if len(image) % 64 != 0: image += b"\xFF" * (64 - len(image) % 64) assert list(validators.parse_silabs_ebl(image)) return image def create_gbl_image(tags): # All images start with an 8-byte header tags = [(b"\xEB\x17\xA6\x03", b"\x00\x00\x00\x03\x01\x01\x00\x00")] + tags assert all(len(tag) == 4 for tag, value in tags) image = b"".join( tag + len(value).to_bytes(4, "little") + value for tag, value in tags ) # And end with a checksum image += ( b"\xFC\x04\x04\xFC" + b"\x04\x00\x00\x00" + zlib.crc32(image + b"\xFC\x04\x04\xFC" + b"\x04\x00\x00\x00").to_bytes( 4, "little" ) ) assert list(validators.parse_silabs_gbl(image)) return image VALID_EBL_IMAGE = create_ebl_image([(b"ab", b"foo")]) VALID_GBL_IMAGE = create_gbl_image([(b"test", b"foo")]) def create_subelement(tag_id, value): return SubElement.deserialize( tag_id.serialize() + len(value).to_bytes(4, "little") + value )[0] def test_parse_silabs_ebl(): list(validators.parse_silabs_ebl(VALID_EBL_IMAGE)) image = create_ebl_image([(b"AA", b"test"), (b"BB", b"foo" * 20)]) header, tag1, tag2, checksum = validators.parse_silabs_ebl(image) assert len(image) % 64 == 0 assert header[0] == b"\x00\x00" and len(header[1]) == 140 assert tag1 == (b"AA", b"test") assert tag2 == (b"BB", b"foo" * 20) assert checksum[0] == b"\xFC\x04" and len(checksum[1]) == 4 # Padding needs to be a multiple of 64 bytes with pytest.raises(ValidationError): list(validators.parse_silabs_ebl(image[:-1])) with pytest.raises(ValidationError): list(validators.parse_silabs_ebl(image + b"\xFF")) # Nothing can come after the padding assert list(validators.parse_silabs_ebl(image[:-1] + b"\xFF")) with pytest.raises(ValidationError): list(validators.parse_silabs_ebl(image[:-1] + b"\xAB")) # Truncated images are detected with pytest.raises(ValidationError): list(validators.parse_silabs_ebl(image[: image.index(b"test")] + b"\xFF" * 44)) # As are corrupted images of the correct length but with bad tag lengths with pytest.raises(ValidationError): index = image.index(b"test") bad_image = image[: index - 2] + b"\xFF\xFF" + image[index:] list(validators.parse_silabs_ebl(bad_image)) # Truncated but at a 64-byte boundary, missing CRC footer with pytest.raises(ValidationError): bad_image = create_ebl_image([(b"AA", b"test" * 11)]) bad_image = bad_image[: bad_image.rindex(b"test") + 4] list(validators.parse_silabs_ebl(bad_image)) # Corrupted images are detected corrupted_image = image.replace(b"foo", b"goo", 1) assert image != corrupted_image with pytest.raises(ValidationError): list(validators.parse_silabs_ebl(corrupted_image)) def test_parse_silabs_gbl(): list(validators.parse_silabs_gbl(VALID_GBL_IMAGE)) image = create_gbl_image([(b"AAAA", b"test"), (b"BBBB", b"foo" * 20)]) header, tag1, tag2, checksum = validators.parse_silabs_gbl(image) assert header[0] == b"\xEB\x17\xA6\x03" and len(header[1]) == 8 assert tag1 == (b"AAAA", b"test") assert tag2 == (b"BBBB", b"foo" * 20) assert checksum[0] == b"\xFC\x04\x04\xFC" and len(checksum[1]) == 4 # Arbitrary padding is allowed parsed_image = [header, tag1, tag2, checksum] assert list(validators.parse_silabs_gbl(image + b"\x00")) == parsed_image assert list(validators.parse_silabs_gbl(image + b"\xAB\xCD\xEF")) == parsed_image # Normal truncated images are detected with pytest.raises(ValidationError): list(validators.parse_silabs_gbl(image[-10:])) # Structurally sound but truncated images are detected with pytest.raises(ValidationError): offset = image.index(b"test") bad_image = image[: offset - 8] list(validators.parse_silabs_gbl(bad_image)) # Corrupted images are detected with pytest.raises(ValidationError): corrupted_image = image.replace(b"foo", b"goo", 1) assert image != corrupted_image list(validators.parse_silabs_gbl(corrupted_image)) def test_validate_firmware(): assert validators.validate_firmware(VALID_EBL_IMAGE) == ValidationResult.VALID with pytest.raises(ValidationError): validators.validate_firmware(VALID_EBL_IMAGE[:-1]) with pytest.raises(ValidationError): validators.validate_firmware(VALID_EBL_IMAGE + b"\xFF") assert validators.validate_firmware(VALID_GBL_IMAGE) == ValidationResult.VALID with pytest.raises(ValidationError): validators.validate_firmware(VALID_GBL_IMAGE[:-1]) assert validators.validate_firmware(b"UNKNOWN") == ValidationResult.UNKNOWN def test_validate_ota_image_simple_valid(): image = OTAImage() image.subelements = [ create_subelement(ElementTagId.UPGRADE_IMAGE, VALID_EBL_IMAGE), ] assert validators.validate_ota_image(image) == ValidationResult.VALID def test_validate_ota_image_complex_valid(): image = OTAImage() image.subelements = [ create_subelement(ElementTagId.ECDSA_SIGNATURE_CRYPTO_SUITE_1, b"asd"), create_subelement(ElementTagId.UPGRADE_IMAGE, VALID_EBL_IMAGE), create_subelement(ElementTagId.UPGRADE_IMAGE, VALID_GBL_IMAGE), create_subelement(ElementTagId.ECDSA_SIGNING_CERTIFICATE_CRYPTO_SUITE_1, b"ab"), ] assert validators.validate_ota_image(image) == ValidationResult.VALID def test_validate_ota_image_invalid(): image = OTAImage() image.subelements = [ create_subelement(ElementTagId.UPGRADE_IMAGE, VALID_EBL_IMAGE[:-1]), ] with pytest.raises(ValidationError): validators.validate_ota_image(image) def test_validate_ota_image_mixed_invalid(): image = OTAImage() image.subelements = [ create_subelement(ElementTagId.UPGRADE_IMAGE, b"unknown"), create_subelement(ElementTagId.UPGRADE_IMAGE, VALID_EBL_IMAGE[:-1]), ] with pytest.raises(ValidationError): validators.validate_ota_image(image) def test_validate_ota_image_mixed_valid(): image = OTAImage() image.subelements = [ create_subelement(ElementTagId.UPGRADE_IMAGE, b"unknown1"), create_subelement(ElementTagId.UPGRADE_IMAGE, VALID_EBL_IMAGE), ] assert validators.validate_ota_image(image) == ValidationResult.UNKNOWN def test_validate_ota_image_empty(): image = OTAImage() image.subelements = [] assert validators.validate_ota_image(image) == ValidationResult.UNKNOWN def test_check_invalid_unknown(): image = mock.Mock() assert validators.validate_ota_image(image) == ValidationResult.UNKNOWN def test_check_invalid(): image = OTAImage() with mock.patch("zigpy.ota.validators.validate_ota_image") as m: m.side_effect = [ValidationResult.VALID] assert not validators.check_invalid(image) with mock.patch("zigpy.ota.validators.validate_ota_image") as m: m.side_effect = [ValidationResult.UNKNOWN] assert not validators.check_invalid(image) with mock.patch("zigpy.ota.validators.validate_ota_image") as m: m.side_effect = [ValidationError("error")] assert validators.check_invalid(image) zigpy-0.62.3/tests/test_quirks.py000066400000000000000000001055451456054056700170630ustar00rootroot00000000000000import asyncio import itertools from typing import Final import pytest from zigpy.const import ( SIG_ENDPOINTS, SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE, SIG_MANUFACTURER, SIG_MODEL, SIG_MODELS_INFO, SIG_SKIP_CONFIG, ) import zigpy.device import zigpy.endpoint import zigpy.quirks from zigpy.quirks.registry import DeviceRegistry import zigpy.types as t import zigpy.zcl as zcl from .async_mock import AsyncMock, MagicMock, patch, sentinel ALLOWED_SIGNATURE = { SIG_EP_PROFILE, SIG_EP_TYPE, SIG_MANUFACTURER, SIG_MODEL, SIG_EP_INPUT, SIG_EP_OUTPUT, } ALLOWED_REPLACEMENT = {SIG_ENDPOINTS} def test_registry(): class TestDevice(zigpy.quirks.CustomDevice): signature = {SIG_MODEL: "model"} assert TestDevice in zigpy.quirks._DEVICE_REGISTRY assert zigpy.quirks._DEVICE_REGISTRY.remove(TestDevice) is None # :-/ assert TestDevice not in zigpy.quirks._DEVICE_REGISTRY @pytest.fixture def real_device(app_mock): ieee = sentinel.ieee nwk = 0x2233 real_device = zigpy.device.Device(app_mock, ieee, nwk) real_device.add_endpoint(1) real_device[1].profile_id = 255 real_device[1].device_type = 255 real_device.model = "model" real_device.manufacturer = "manufacturer" real_device[1].add_input_cluster(3) real_device[1].add_output_cluster(6) return real_device @pytest.fixture def real_device_2(app_mock): ieee = sentinel.ieee_2 nwk = 0x3344 real_device = zigpy.device.Device(app_mock, ieee, nwk) real_device.add_endpoint(1) real_device[1].profile_id = 255 real_device[1].device_type = 255 real_device.model = "model" real_device.manufacturer = "A different manufacturer" real_device[1].add_input_cluster(3) real_device[1].add_output_cluster(6) return real_device def _dev_reg(device): registry = DeviceRegistry() registry.add_to_registry(device) return registry def test_get_device_new_sig(real_device): class TestDevice: signature = {} def __init__(*args, **kwargs): pass def get_signature(self): pass registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_ENDPOINTS] = {1: {SIG_EP_PROFILE: 1}} registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_PROFILE] = 255 TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_TYPE] = 1 registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_TYPE] = 255 TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_INPUT] = [1] registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_INPUT] = [3] TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_OUTPUT] = [1] registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_ENDPOINTS][1][SIG_EP_OUTPUT] = [6] TestDevice.signature[SIG_MODEL] = "x" registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_MODEL] = "model" TestDevice.signature[SIG_MANUFACTURER] = "x" registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_MANUFACTURER] = "manufacturer" registry = _dev_reg(TestDevice) assert isinstance(registry.get_device(real_device), TestDevice) TestDevice.signature[SIG_ENDPOINTS][2] = {SIG_EP_PROFILE: 2} registry = _dev_reg(TestDevice) assert registry.get_device(real_device) is real_device assert zigpy.quirks.get_device(real_device, registry) is real_device def test_model_manuf_device_sig(real_device): class TestDevice: signature = {} def __init__(*args, **kwargs): pass def get_signature(self): pass registry = DeviceRegistry() registry.add_to_registry(TestDevice) assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_ENDPOINTS] = { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } } TestDevice.signature[SIG_MODEL] = "x" assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_MODEL] = "model" TestDevice.signature[SIG_MANUFACTURER] = "x" assert registry.get_device(real_device) is real_device TestDevice.signature[SIG_MANUFACTURER] = "manufacturer" assert isinstance(registry.get_device(real_device), TestDevice) def test_custom_devices(): def _check_range(cluster): for range in zcl.Cluster._registry_range: if range[0] <= cluster <= range[1]: return True return False # Validate that all CustomDevices look sane reg = zigpy.quirks._DEVICE_REGISTRY.registry candidates = list( itertools.chain(*itertools.chain(*[m.values() for m in reg.values()])) ) for device in candidates: # enforce new style of signature assert SIG_ENDPOINTS in device.signature numeric = [eid for eid in device.signature if isinstance(eid, int)] assert not numeric # Check that the signature data is OK signature = device.signature[SIG_ENDPOINTS] for profile_id, profile_data in signature.items(): assert isinstance(profile_id, int) assert set(profile_data.keys()) - ALLOWED_SIGNATURE == set() # Check that the replacement data is OK assert set(device.replacement.keys()) - ALLOWED_REPLACEMENT == set() for epid, epdata in device.replacement.get(SIG_ENDPOINTS, {}).items(): assert (epid in signature) or ( "profile" in epdata and SIG_EP_TYPE in epdata ) if "profile" in epdata: profile = epdata["profile"] assert isinstance(profile, int) and 0 <= profile <= 0xFFFF if SIG_EP_TYPE in epdata: device_type = epdata[SIG_EP_TYPE] assert isinstance(device_type, int) and 0 <= device_type <= 0xFFFF all_clusters = epdata.get(SIG_EP_INPUT, []) + epdata.get(SIG_EP_OUTPUT, []) for cluster in all_clusters: assert ( (isinstance(cluster, int) and cluster in zcl.Cluster._registry) or (isinstance(cluster, int) and _check_range(cluster)) or issubclass(cluster, zcl.Cluster) ) def test_custom_device(app_mock): class Device(zigpy.quirks.CustomDevice): signature = {} class MyEndpoint: def __init__(self, device, endpoint_id, *args, **kwargs): assert args == (sentinel.custom_endpoint_arg, replaces) class MyCluster(zigpy.quirks.CustomCluster): cluster_id = 0x8888 replacement = { SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: sentinel.profile_id, SIG_EP_INPUT: [0x0000, MyCluster], SIG_EP_OUTPUT: [0x0001, MyCluster], }, 2: (MyEndpoint, sentinel.custom_endpoint_arg), }, SIG_MODEL: "Mock Model", SIG_MANUFACTURER: "Mock Manufacturer", } class Device2(zigpy.quirks.CustomDevice): signature = {} class MyEndpoint: def __init__(self, device, endpoint_id, *args, **kwargs): assert args == (sentinel.custom_endpoint_arg, replaces) class MyCluster(zigpy.quirks.CustomCluster): cluster_id = 0x8888 replacement = { SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: sentinel.profile_id, SIG_EP_INPUT: [0x0000, MyCluster], SIG_EP_OUTPUT: [0x0001, MyCluster], }, 2: (MyEndpoint, sentinel.custom_endpoint_arg), }, SIG_MODEL: "Mock Model", SIG_MANUFACTURER: "Mock Manufacturer", SIG_SKIP_CONFIG: True, } assert 0x8888 not in zcl.Cluster._registry replaces = MagicMock() replaces[1].device_type = sentinel.device_type test_device = Device(app_mock, None, 0x4455, replaces) test_device2 = Device2(app_mock, None, 0x4455, replaces) assert test_device2.skip_configuration is True assert test_device.manufacturer == "Mock Manufacturer" assert test_device.model == "Mock Model" assert test_device.skip_configuration is False assert test_device[1].profile_id == sentinel.profile_id assert test_device[1].device_type == sentinel.device_type assert 0x0000 in test_device[1].in_clusters assert 0x8888 in test_device[1].in_clusters assert isinstance(test_device[1].in_clusters[0x8888], Device.MyCluster) assert 0x0001 in test_device[1].out_clusters assert 0x8888 in test_device[1].out_clusters assert isinstance(test_device[1].out_clusters[0x8888], Device.MyCluster) assert isinstance(test_device[2], Device.MyEndpoint) test_device.add_endpoint(3) assert isinstance(test_device[3], zigpy.endpoint.Endpoint) assert zigpy.quirks._DEVICE_REGISTRY.remove(Device) is None # :-/ assert Device not in zigpy.quirks._DEVICE_REGISTRY def test_custom_cluster_idx(): class TestClusterIdx(zigpy.quirks.CustomCluster): cluster_id = 0x1234 class AttributeDefs(zcl.foundation.BaseAttributeDefs): first_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0000, type=t.uint8_t ) second_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x00FF, type=t.enum8 ) class ServerCommandDefs(zcl.foundation.BaseCommandDefs): server_cmd_0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={"param1": t.uint8_t, "param2": t.uint8_t}, direction=False, ) server_cmd_2: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={"param1": t.uint8_t, "param2": t.uint8_t}, direction=False, ) class ClientCommandDefs(zcl.foundation.BaseCommandDefs): client_cmd_0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={"param1": t.uint8_t}, direction=True ) client_cmd_1: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={"param1": t.uint8_t}, direction=True ) assert hasattr(TestClusterIdx, "attributes_by_name") attr_idx_len = len(TestClusterIdx.attributes_by_name) attrs_len = len(TestClusterIdx.attributes) assert attr_idx_len == attrs_len for attr_name, attr in TestClusterIdx.attributes_by_name.items(): assert TestClusterIdx.attributes[attr.id].name == attr_name async def test_read_attributes_uncached(): class TestCluster(zigpy.quirks.CustomCluster): cluster_id = 0x1234 _CONSTANT_ATTRIBUTES = {0x0001: 5} class AttributeDefs(zcl.foundation.BaseAttributeDefs): first_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0000, type=t.uint8_t ) second_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0001, type=t.uint8_t ) third_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0002, type=t.uint8_t ) fouth_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0003, type=t.enum8 ) class ServerCommandDefs(zcl.foundation.BaseCommandDefs): server_cmd_0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={"param1": t.uint8_t, "param2": t.uint8_t}, direction=False, ) server_cmd_2: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={"param1": t.uint8_t, "param2": t.uint8_t}, direction=False, ) class ClientCommandDefs(zcl.foundation.BaseCommandDefs): client_cmd_0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={"param1": t.uint8_t}, direction=True ) client_cmd_1: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={"param1": t.uint8_t}, direction=True ) class TestCluster2(zigpy.quirks.CustomCluster): cluster_id = 0x1235 class AttributeDefs(zcl.foundation.BaseAttributeDefs): first_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0000, type=t.uint8_t ) epmock = MagicMock() epmock._device.get_sequence.return_value = 123 epmock.device.get_sequence.return_value = 123 cluster = TestCluster(epmock, True) cluster2 = TestCluster2(epmock, True) async def mockrequest( foundation, command, schema, args, manufacturer=None, **kwargs ): assert foundation is True assert command == 0x00 rar0 = _mk_rar(0x0000, 99) rar99 = _mk_rar(0x0002, None, 1) rar199 = _mk_rar(0x0003, 199) return [[rar0, rar99, rar199]] # Unknown attribute read passes through with pytest.raises(KeyError): cluster.get("unknown_attribute", 123) assert "unknown_attribute" not in cluster._attr_cache # Constant attribute can be read with `get` assert cluster.get("second_attribute") == 5 assert "second_attribute" not in cluster._attr_cache # test no constants cluster.request = mockrequest success, failure = await cluster.read_attributes([0, 2, 3]) assert success[0x0000] == 99 assert failure[0x0002] == 1 assert success[0x0003] == 199 assert cluster.get(0x0003) == 199 # test mixed response with constant success, failure = await cluster.read_attributes([0, 1, 2, 3]) assert success[0x0000] == 99 assert success[0x0001] == 5 assert failure[0x0002] == 1 assert success[0x0003] == 199 # test just constant attr success, failure = await cluster.read_attributes([1]) assert success[1] == 5 # test just constant attr cluster2.request = mockrequest success, failure = await cluster2.read_attributes([0, 2, 3]) assert success[0x0000] == 99 assert failure[0x0002] == 1 assert success[0x0003] == 199 async def test_read_attributes_default_response(): class TestCluster(zigpy.quirks.CustomCluster): cluster_id = 0x1234 _CONSTANT_ATTRIBUTES = {0x0001: 5} class AttributeDefs(zcl.foundation.BaseAttributeDefs): first_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0000, type=t.uint8_t ) second_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0001, type=t.uint8_t ) third_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0002, type=t.uint8_t ) fouth_attribute: Final = zcl.foundation.ZCLAttributeDef( id=0x0003, type=t.enum8 ) class ServerCommandDefs(zcl.foundation.BaseCommandDefs): server_cmd_0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={"param1": t.uint8_t, "param2": t.uint8_t}, direction=False, ) server_cmd_2: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={"param1": t.uint8_t, "param2": t.uint8_t}, direction=False, ) class ClientCommandDefs(zcl.foundation.BaseCommandDefs): client_cmd_0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={"param1": t.uint8_t}, direction=True ) client_cmd_1: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={"param1": t.uint8_t}, direction=True ) epmock = MagicMock() epmock._device.get_sequence.return_value = 123 epmock.device.get_sequence.return_value = 123 cluster = TestCluster(epmock, True) async def mockrequest( foundation, command, schema, args, manufacturer=None, **kwargs ): assert foundation is True assert command == 0 return [0xC1] cluster.request = mockrequest # test constants with errors success, failure = await cluster.read_attributes([0, 1, 2, 3], allow_cache=False) assert success == {1: 5} assert failure == {0: 0xC1, 2: 0xC1, 3: 0xC1} def _mk_rar(attrid, value, status=0): r = zcl.foundation.ReadAttributeRecord() r.attrid = attrid r.status = status r.value = zcl.foundation.TypeValue() r.value.value = value return r class ManufacturerSpecificCluster(zigpy.quirks.CustomCluster): cluster_id = 0x2222 ep_attribute = "just_a_cluster" class AttributeDefs(zcl.foundation.BaseAttributeDefs): attr0: Final = zcl.foundation.ZCLAttributeDef(id=0x0000, type=t.uint8_t) attr1: Final = zcl.foundation.ZCLAttributeDef( id=0x0001, type=t.uint16_t, is_manufacturer_specific=True ) class ServerCommandDefs(zcl.foundation.BaseCommandDefs): server_cmd0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={}, direction=False ) server_cmd1: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={}, direction=False, is_manufacturer_specific=True ) class ClientCommandDefs(zcl.foundation.BaseCommandDefs): client_cmd0: Final = zcl.foundation.ZCLCommandDef( id=0x00, schema={}, direction=False ) client_cmd1: Final = zcl.foundation.ZCLCommandDef( id=0x01, schema={}, direction=False, is_manufacturer_specific=True ) @pytest.fixture def manuf_cluster(): """Return a manufacturer specific cluster fixture.""" ep = MagicMock() ep.manufacturer_id = sentinel.manufacturer_id return ManufacturerSpecificCluster.from_id(ep, 0x2222) @pytest.fixture def manuf_cluster2(): """Return a manufacturer specific cluster fixture.""" class ManufCluster2(ManufacturerSpecificCluster): ep_attribute = "just_a_manufacturer_specific_cluster" cluster_id = 0xFC00 ep = MagicMock() ep.manufacturer_id = sentinel.manufacturer_id2 cluster = ManufCluster2(ep) cluster.cluster_id = 0xFC00 return cluster @pytest.mark.parametrize( "cmd_name, manufacturer", ( ("client_cmd0", None), ("client_cmd1", sentinel.manufacturer_id), ), ) async def test_client_cmd_vendor_specific_by_name( manuf_cluster, manuf_cluster2, cmd_name, manufacturer ): """Test manufacturer specific client commands.""" with patch.object(manuf_cluster, "reply", AsyncMock()) as cmd_mock: await getattr(manuf_cluster, cmd_name)() await asyncio.sleep(0.01) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1][SIG_MANUFACTURER] is manufacturer with patch.object(manuf_cluster2, "reply", AsyncMock()) as cmd_mock: await getattr(manuf_cluster2, cmd_name)() await asyncio.sleep(0.01) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1][SIG_MANUFACTURER] is sentinel.manufacturer_id2 @pytest.mark.parametrize( "cmd_name, manufacturer", ( ("server_cmd0", None), ("server_cmd1", sentinel.manufacturer_id), ), ) async def test_srv_cmd_vendor_specific_by_name( manuf_cluster, manuf_cluster2, cmd_name, manufacturer ): """Test manufacturer specific server commands.""" with patch.object(manuf_cluster, "request", AsyncMock()) as cmd_mock: await getattr(manuf_cluster, cmd_name)() await asyncio.sleep(0.01) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is manufacturer with patch.object(manuf_cluster2, "request", AsyncMock()) as cmd_mock: await getattr(manuf_cluster2, cmd_name)() await asyncio.sleep(0.01) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.manufacturer_id2 @pytest.mark.parametrize( "attr_name, manufacturer", ( ("attr0", None), ("attr1", sentinel.manufacturer_id), ), ) async def test_read_attr_manufacture_specific( manuf_cluster, manuf_cluster2, attr_name, manufacturer ): """Test manufacturer specific read_attributes command.""" with patch.object(zcl.Cluster, "_read_attributes", AsyncMock()) as cmd_mock: await manuf_cluster.read_attributes([attr_name]) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is manufacturer cmd_mock.reset_mock() await manuf_cluster.read_attributes( [attr_name], manufacturer=sentinel.another_id ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id with patch.object(zcl.Cluster, "_read_attributes", AsyncMock()) as cmd_mock: await manuf_cluster2.read_attributes([attr_name]) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.manufacturer_id2 cmd_mock.reset_mock() await manuf_cluster2.read_attributes( [attr_name], manufacturer=sentinel.another_id ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id @pytest.mark.parametrize( "attr_name, manufacturer", ( ("attr0", None), ("attr1", sentinel.manufacturer_id), ), ) async def test_write_attr_manufacture_specific( manuf_cluster, manuf_cluster2, attr_name, manufacturer ): """Test manufacturer specific write_attributes command.""" with patch.object(zcl.Cluster, "_write_attributes", AsyncMock()) as cmd_mock: await manuf_cluster.write_attributes({attr_name: 0x12}) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is manufacturer cmd_mock.reset_mock() await manuf_cluster.write_attributes( {attr_name: 0x12}, manufacturer=sentinel.another_id ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id with patch.object(zcl.Cluster, "_write_attributes", AsyncMock()) as cmd_mock: await manuf_cluster2.write_attributes({attr_name: 0x12}) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.manufacturer_id2 cmd_mock.reset_mock() await manuf_cluster2.write_attributes( {attr_name: 0x12}, manufacturer=sentinel.another_id ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id @pytest.mark.parametrize( "attr_name, manufacturer", ( ("attr0", None), ("attr1", sentinel.manufacturer_id), ), ) async def test_write_attr_undivided_manufacture_specific( manuf_cluster, manuf_cluster2, attr_name, manufacturer ): """Test manufacturer specific write_attributes_undivided command.""" with patch.object( zcl.Cluster, "_write_attributes_undivided", AsyncMock() ) as cmd_mock: await manuf_cluster.write_attributes_undivided({attr_name: 0x12}) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is manufacturer cmd_mock.reset_mock() await manuf_cluster.write_attributes_undivided( {attr_name: 0x12}, manufacturer=sentinel.another_id ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id with patch.object( zcl.Cluster, "_write_attributes_undivided", AsyncMock() ) as cmd_mock: await manuf_cluster2.write_attributes_undivided({attr_name: 0x12}) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.manufacturer_id2 cmd_mock.reset_mock() await manuf_cluster2.write_attributes_undivided( {attr_name: 0x12}, manufacturer=sentinel.another_id ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id @pytest.mark.parametrize( "attr_name, manufacturer", ( ("attr0", None), ("attr1", sentinel.manufacturer_id), ), ) async def test_configure_reporting_manufacture_specific( manuf_cluster, manuf_cluster2, attr_name, manufacturer ): """Test manufacturer specific configure_reporting command.""" with patch.object(zcl.Cluster, "_configure_reporting", AsyncMock()) as cmd_mock: await manuf_cluster.configure_reporting( attr_name, min_interval=1, max_interval=1, reportable_change=1 ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is manufacturer cmd_mock.reset_mock() await manuf_cluster.configure_reporting( attr_name, min_interval=1, max_interval=1, reportable_change=1, manufacturer=sentinel.another_id, ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id with patch.object(zcl.Cluster, "_configure_reporting", AsyncMock()) as cmd_mock: await manuf_cluster2.configure_reporting( attr_name, min_interval=1, max_interval=1, reportable_change=1 ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.manufacturer_id2 cmd_mock.reset_mock() await manuf_cluster2.configure_reporting( attr_name, min_interval=1, max_interval=1, reportable_change=1, manufacturer=sentinel.another_id, ) assert cmd_mock.call_count == 1 assert cmd_mock.call_args[1]["manufacturer"] is sentinel.another_id def test_different_manuf_same_model(real_device, real_device_2): """Test quirk matching for same model, but different manufacturers.""" class TestDevice_1(zigpy.quirks.CustomDevice): signature = { SIG_MODELS_INFO: (("manufacturer", "model"),), SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } def get_signature(self): pass class TestDevice_2(zigpy.quirks.CustomDevice): signature = { SIG_MODELS_INFO: (("A different manufacturer", "model"),), SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } def get_signature(self): pass registry = DeviceRegistry() registry.add_to_registry(TestDevice_1) assert isinstance(registry.get_device(real_device), TestDevice_1) assert registry.get_device(real_device_2) is real_device_2 registry.add_to_registry(TestDevice_2) assert isinstance(registry.get_device(real_device_2), TestDevice_2) assert not zigpy.quirks.get_quirk_list("manufacturer", "no such model") assert not zigpy.quirks.get_quirk_list("manufacturer", "no such model", registry) assert not zigpy.quirks.get_quirk_list("A different manufacturer", "no such model") assert not zigpy.quirks.get_quirk_list( "A different manufacturer", "no such model", registry ) assert not zigpy.quirks.get_quirk_list("no such manufacturer", "model") assert not zigpy.quirks.get_quirk_list("no such manufacturer", "model", registry) manuf1_list = zigpy.quirks.get_quirk_list("manufacturer", "model", registry) assert len(manuf1_list) == 1 assert manuf1_list[0] is TestDevice_1 manuf2_list = zigpy.quirks.get_quirk_list( "A different manufacturer", "model", registry ) assert len(manuf2_list) == 1 assert manuf2_list[0] is TestDevice_2 def test_quirk_match_order(real_device, real_device_2): """Test quirk matching order to allow user overrides via custom quirks.""" class BuiltInQuirk(zigpy.quirks.CustomDevice): signature = { SIG_MODELS_INFO: (("manufacturer", "model"),), SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } def get_signature(self): pass class CustomQuirk(BuiltInQuirk): pass registry = DeviceRegistry() registry.add_to_registry(BuiltInQuirk) # With only a single matching quirk there is no choice but to use the first one assert type(registry.get_device(real_device)) is BuiltInQuirk registry.add_to_registry(CustomQuirk) # A quirk registered later that also matches the device will be preferred assert type(registry.get_device(real_device)) is CustomQuirk def test_quirk_wildcard_manufacturer(real_device, real_device_2): """Test quirk matching with a wildcard (None) manufacturer.""" class BaseDev(zigpy.quirks.CustomDevice): def get_signature(self): pass class ModelsQuirk(BaseDev): signature = { SIG_MODELS_INFO: (("manufacturer", "model"),), SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } class ModelsQuirkNoMatch(BaseDev): # same model and manufacture, different endpoint signature signature = { SIG_MODELS_INFO: (("manufacturer", "model"),), SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 260, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } class ModelOnlyQuirk(BaseDev): # Wildcard Manufacturer signature = { SIG_MODEL: "model", SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } class ModelOnlyQuirkNoMatch(BaseDev): # Wildcard Manufacturer, none matching endpoint signature signature = { SIG_MODEL: "model", SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 260, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } registry = DeviceRegistry() for quirk in ModelsQuirk, ModelsQuirkNoMatch, ModelOnlyQuirk, ModelOnlyQuirkNoMatch: registry.add_to_registry(quirk) quirked = registry.get_device(real_device) assert isinstance(quirked, ModelsQuirk) quirked = registry.get_device(real_device_2) assert isinstance(quirked, ModelOnlyQuirk) real_device.manufacturer = ( "We are expected to match a manufacturer wildcard quirk now" ) quirked = registry.get_device(real_device) assert isinstance(quirked, ModelOnlyQuirk) real_device.model = "And now we should not match any quirk" quirked = registry.get_device(real_device) assert quirked is real_device async def test_manuf_id_disable(real_device): class TestCluster(ManufacturerSpecificCluster): cluster_id = 0xFF00 real_device.manufacturer_id_override = 0x1234 ep = real_device.endpoints[1] ep.add_input_cluster(TestCluster.cluster_id, TestCluster(ep)) assert isinstance(ep.just_a_cluster, TestCluster) assert ep.manufacturer_id == 0x1234 # The default behavior for a manufacturer-specific cluster, command, or attribute is # to include the manufacturer ID in the request with patch.object(ep, "request", AsyncMock()) as request_mock: request_mock.return_value = (zcl.foundation.Status.SUCCESS, "done") await ep.just_a_cluster.command( ep.just_a_cluster.commands_by_name["server_cmd0"].id, ) await ep.just_a_cluster.read_attributes(["attr0"]) await ep.just_a_cluster.write_attributes({"attr0": 1}) assert len(request_mock.mock_calls) == 3 for mock_call in request_mock.mock_calls: data = mock_call.args[2] hdr, _ = zcl.foundation.ZCLHeader.deserialize(data) assert hdr.manufacturer == 0x1234 # But it can be disabled by passing NO_MANUFACTURER_ID with patch.object(ep, "request", AsyncMock()) as request_mock: request_mock.return_value = (zcl.foundation.Status.SUCCESS, "done") await ep.just_a_cluster.command( ep.just_a_cluster.commands_by_name["server_cmd0"].id, manufacturer=zcl.foundation.ZCLHeader.NO_MANUFACTURER_ID, ) await ep.just_a_cluster.read_attributes( ["attr0"], manufacturer=zcl.foundation.ZCLHeader.NO_MANUFACTURER_ID ) await ep.just_a_cluster.write_attributes( {"attr0": 1}, manufacturer=zcl.foundation.ZCLHeader.NO_MANUFACTURER_ID ) assert len(request_mock.mock_calls) == 3 for mock_call in request_mock.mock_calls: data = mock_call.args[2] hdr, _ = zcl.foundation.ZCLHeader.deserialize(data) assert hdr.manufacturer is None async def test_request_with_kwargs(real_device): class CustomLevel(zigpy.quirks.CustomCluster, zcl.clusters.general.LevelControl): pass class TestQuirk(zigpy.quirks.CustomDevice): signature = { SIG_MODELS_INFO: (("manufacturer", "model"),), SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3], SIG_EP_OUTPUT: [6], } }, } replacement = { SIG_ENDPOINTS: { 1: { SIG_EP_PROFILE: 255, SIG_EP_TYPE: 255, SIG_EP_INPUT: [3, CustomLevel], SIG_EP_OUTPUT: [6], } }, } registry = DeviceRegistry() registry.add_to_registry(TestQuirk) quirked = registry.get_device(real_device) assert isinstance(quirked, TestQuirk) ep = quirked.endpoints[1] with patch.object(ep, "request", AsyncMock()) as request_mock: ep.device.get_sequence = MagicMock(return_value=1) await ep.level.move_to_level(0x00, 123) await ep.level.move_to_level(0x00, transition_time=123) await ep.level.move_to_level(level=0x00, transition_time=123) assert len(request_mock.mock_calls) == 3 assert all(c == request_mock.mock_calls[0] for c in request_mock.mock_calls) zigpy-0.62.3/tests/test_quirks_registry.py000066400000000000000000000133571456054056700210120ustar00rootroot00000000000000from unittest import mock import pytest from zigpy.const import SIG_MODELS_INFO from zigpy.quirks.registry import DeviceRegistry class FakeDevice: def __init__(self): self.signature = {} @pytest.fixture def fake_dev(): return FakeDevice() def test_add_to_registry_new_sig(fake_dev): fake_dev.signature = { 1: {}, 2: {}, 3: { "manufacturer": mock.sentinel.legacy_manufacturer, "model": mock.sentinel.legacy_model, }, "endpoints": { 1: { "manufacturer": mock.sentinel.manufacturer, "model": mock.sentinel.model, } }, "manufacturer": mock.sentinel.dev_manufacturer, "model": mock.sentinel.dev_model, } reg = DeviceRegistry() quirk_list = mock.MagicMock() model_dict = mock.MagicMock(spec_set=dict) model_dict.__getitem__.return_value = quirk_list manuf_dict = mock.MagicMock() manuf_dict.__getitem__.return_value = model_dict reg._registry = manuf_dict reg.add_to_registry(fake_dev) assert manuf_dict.__getitem__.call_count == 2 assert manuf_dict.__getitem__.call_args[0][0] is mock.sentinel.dev_manufacturer assert model_dict.__getitem__.call_count == 2 assert model_dict.__getitem__.call_args[0][0] is mock.sentinel.dev_model assert quirk_list.insert.call_count == 1 assert quirk_list.insert.call_args[0][1] is fake_dev quirk_list.reset_mock() model_dict.reset_mock() manuf_dict.reset_mock() def test_add_to_registry_models_info(fake_dev): fake_dev.signature = { 1: {}, 2: {}, 3: { "manufacturer": mock.sentinel.legacy_manufacturer, "model": mock.sentinel.legacy_model, }, "endpoints": { 1: { "manufacturer": mock.sentinel.manufacturer, "model": mock.sentinel.model, } }, SIG_MODELS_INFO: [ (mock.sentinel.manuf_1, mock.sentinel.model_1), (mock.sentinel.manuf_2, mock.sentinel.model_2), ], } reg = DeviceRegistry() quirk_list = mock.MagicMock() model_dict = mock.MagicMock(spec_set=dict) model_dict.__getitem__.return_value = quirk_list manuf_dict = mock.MagicMock() manuf_dict.__getitem__.return_value = model_dict reg._registry = manuf_dict reg.add_to_registry(fake_dev) assert manuf_dict.__getitem__.call_count == 4 assert manuf_dict.__getitem__.call_args_list[0][0][0] is mock.sentinel.manuf_1 assert manuf_dict.__getitem__.call_args_list[2][0][0] is mock.sentinel.manuf_2 assert model_dict.__getitem__.call_count == 4 assert model_dict.__getitem__.call_args_list[0][0][0] is mock.sentinel.model_1 assert model_dict.__getitem__.call_args_list[2][0][0] is mock.sentinel.model_2 assert quirk_list.insert.call_count == 2 assert quirk_list.insert.call_args_list[0][0][1] is fake_dev assert quirk_list.insert.call_args_list[1][0][1] is fake_dev quirk_list.reset_mock() model_dict.reset_mock() manuf_dict.reset_mock() def test_remove_new_sig(fake_dev): fake_dev.signature = { 1: {}, 2: {}, 3: { "manufacturer": mock.sentinel.legacy_manufacturer, "model": mock.sentinel.legacy_model, }, "endpoints": { 1: { "manufacturer": mock.sentinel.manufacturer, "model": mock.sentinel.model, } }, "manufacturer": mock.sentinel.dev_manufacturer, "model": mock.sentinel.dev_model, } reg = DeviceRegistry() quirk_list = mock.MagicMock() model_dict = mock.MagicMock(spec_set=dict) model_dict.__getitem__.return_value = quirk_list manuf_dict = mock.MagicMock() manuf_dict.__getitem__.return_value = model_dict reg._registry = manuf_dict reg.remove(fake_dev) assert manuf_dict.__getitem__.call_count == 1 assert manuf_dict.__getitem__.call_args[0][0] is mock.sentinel.dev_manufacturer assert model_dict.__getitem__.call_count == 1 assert model_dict.__getitem__.call_args[0][0] is mock.sentinel.dev_model assert quirk_list.insert.call_count == 0 assert quirk_list.remove.call_count == 1 assert quirk_list.remove.call_args[0][0] is fake_dev def test_remove_models_info(fake_dev): fake_dev.signature = { 1: {}, 2: {}, 3: { "manufacturer": mock.sentinel.legacy_manufacturer, "model": mock.sentinel.legacy_model, }, "endpoints": { 1: { "manufacturer": mock.sentinel.manufacturer, "model": mock.sentinel.model, } }, SIG_MODELS_INFO: [ (mock.sentinel.manuf_1, mock.sentinel.model_1), (mock.sentinel.manuf_2, mock.sentinel.model_2), ], } reg = DeviceRegistry() quirk_list = mock.MagicMock() model_dict = mock.MagicMock(spec_set=dict) model_dict.__getitem__.return_value = quirk_list manuf_dict = mock.MagicMock() manuf_dict.__getitem__.return_value = model_dict reg._registry = manuf_dict reg.remove(fake_dev) assert manuf_dict.__getitem__.call_count == 2 assert manuf_dict.__getitem__.call_args_list[0][0][0] is mock.sentinel.manuf_1 assert manuf_dict.__getitem__.call_args_list[1][0][0] is mock.sentinel.manuf_2 assert model_dict.__getitem__.call_count == 2 assert model_dict.__getitem__.call_args_list[0][0][0] is mock.sentinel.model_1 assert model_dict.__getitem__.call_args_list[1][0][0] is mock.sentinel.model_2 assert quirk_list.insert.call_count == 0 assert quirk_list.remove.call_count == 2 assert quirk_list.remove.call_args_list[0][0][0] is fake_dev assert quirk_list.remove.call_args_list[1][0][0] is fake_dev zigpy-0.62.3/tests/test_serial.py000066400000000000000000000030411456054056700170100ustar00rootroot00000000000000import unittest.mock import zigpy.serial @unittest.mock.patch( "zigpy.serial.pyserial_asyncio.create_serial_connection", unittest.mock.AsyncMock( return_value=(unittest.mock.AsyncMock(), unittest.mock.AsyncMock()) ), ) async def test_serial_normal(event_loop): protocol_factory = unittest.mock.Mock() await zigpy.serial.create_serial_connection( event_loop, protocol_factory, "/dev/ttyUSB1" ) mock_calls = zigpy.serial.pyserial_asyncio.create_serial_connection.mock_calls assert len(mock_calls) == 1 assert mock_calls[0].kwargs["url"] == "/dev/ttyUSB1" async def test_serial_socket(event_loop): protocol_factory = unittest.mock.Mock() with unittest.mock.patch.object( event_loop, "create_connection", unittest.mock.AsyncMock( return_value=(unittest.mock.AsyncMock(), unittest.mock.AsyncMock()) ), ): await zigpy.serial.create_serial_connection( event_loop, protocol_factory, "socket://1.2.3.4:5678" ) await zigpy.serial.create_serial_connection( event_loop, protocol_factory, "socket://1.2.3.4" ) assert len(event_loop.create_connection.mock_calls) == 2 assert event_loop.create_connection.mock_calls[0].kwargs["host"] == "1.2.3.4" assert event_loop.create_connection.mock_calls[0].kwargs["port"] == 5678 assert event_loop.create_connection.mock_calls[1].kwargs["host"] == "1.2.3.4" assert event_loop.create_connection.mock_calls[1].kwargs["port"] == 6638 zigpy-0.62.3/tests/test_struct.py000066400000000000000000000612111456054056700170600ustar00rootroot00000000000000from __future__ import annotations import enum from unittest import mock import pytest import zigpy.types as t from zigpy.zcl.foundation import Status import zigpy.zdo.types as zdo_t @pytest.fixture def expose_global(): """ `typing.get_type_hints` does not work for types defined within functions """ objects = [] def inner(obj): assert obj.__name__ not in globals() globals()[obj.__name__] = obj objects.append(obj) return obj yield inner for obj in objects: del globals()[obj.__name__] def test_enum_fields(): class EnumNamed(t.enum8): NAME1 = 0x01 NAME2 = 0x10 assert EnumNamed("0x01") == EnumNamed.NAME1 assert EnumNamed("1") == EnumNamed.NAME1 assert EnumNamed("0x10") == EnumNamed.NAME2 assert EnumNamed("16") == EnumNamed.NAME2 assert EnumNamed("NAME1") == EnumNamed.NAME1 assert EnumNamed("NAME2") == EnumNamed.NAME2 assert EnumNamed("EnumNamed.NAME1") == EnumNamed.NAME1 assert EnumNamed("EnumNamed.NAME2") == EnumNamed.NAME2 def test_struct_fields(): class TestStruct(t.Struct): a: t.uint8_t b: t.uint16_t assert TestStruct.fields.a.name == "a" assert TestStruct.fields.a.type == t.uint8_t assert TestStruct.fields.b.name == "b" assert TestStruct.fields.b.type == t.uint16_t def test_struct_subclass_creation(): # In-class constants are allowed class TestStruct3(t.Struct): CONSTANT1: int = 123 CONSTANT2 = 1234 _private1: int = 456 _private2 = 4567 _PRIVATE_CONST = mock.sentinel.priv_const class Test: pass assert not TestStruct3.fields assert TestStruct3.CONSTANT1 == 123 assert TestStruct3.CONSTANT2 == 1234 assert TestStruct3._private1 == 456 assert TestStruct3._private2 == 4567 assert TestStruct3._PRIVATE_CONST is mock.sentinel.priv_const assert TestStruct3()._PRIVATE_CONST is mock.sentinel.priv_const assert TestStruct3.Test # type: ignore[truthy-function] assert TestStruct3().Test assert "Test" not in TestStruct3().as_dict() # Still valid class TestStruct4(t.Struct): pass # Annotations with values are not fields class TestStruct5(t.Struct): a: t.uint8_t = 2 # not a field b: t.uint16_t # is a field inst6 = TestStruct5(123) assert "a" not in inst6.as_dict() assert "b" in inst6.as_dict() # unless they are a StructField class TestStruct6(t.Struct): a: t.uint8_t = t.StructField() assert "a" in TestStruct6(2).as_dict() def test_struct_construction(): class TestStruct(t.Struct): a: t.uint8_t b: t.LVBytes s1 = TestStruct(a=1) s1.b = b"foo" s2 = TestStruct(a=1, b=b"foo") assert s1 == s2 assert s1.a == s2.a assert s1.replace(b=b"foo") == s2.replace(b=b"foo") assert s1.serialize() == s2.serialize() == b"\x01\x03foo" assert TestStruct(s1) == s1 # You cannot use the copy constructor with other keyword arguments with pytest.raises(ValueError): TestStruct(s1, b=b"foo") # Types are coerced on construction so you cannot pass bad values with pytest.raises(ValueError): TestStruct(a=object()) # You can still assign bad values but serialization will fail s1.serialize() s1.b = object() with pytest.raises(ValueError): s1.serialize() def test_nested_structs(expose_global): class OuterStruct(t.Struct): class InnerStruct(t.Struct): b: t.uint8_t c: t.uint8_t a: t.uint8_t inner: None = t.StructField(type=InnerStruct) d: t.uint8_t assert len(OuterStruct.fields) == 3 assert OuterStruct.fields.a.type is t.uint8_t assert OuterStruct.fields.inner.type is OuterStruct.InnerStruct assert len(OuterStruct.fields.inner.type.fields) == 2 assert OuterStruct.fields.d.type is t.uint8_t s, remaining = OuterStruct.deserialize(b"\x00\x01\x02\x03" + b"asd") assert remaining == b"asd" assert s.a == 0 assert s.inner.b == 1 assert s.inner.c == 2 assert s.d == 3 def test_nested_structs2(expose_global): class OuterStruct(t.Struct): class InnerStruct(t.Struct): b: t.uint8_t c: t.uint8_t a: t.uint8_t inner: None = t.StructField(type=InnerStruct) d: t.uint8_t assert len(OuterStruct.fields) == 3 assert OuterStruct.fields[0].type is t.uint8_t assert OuterStruct.fields[1].type is OuterStruct.InnerStruct assert len(OuterStruct.fields[1].type.fields) == 2 assert OuterStruct.fields[2].type is t.uint8_t s, remaining = OuterStruct.deserialize(b"\x00\x01\x02\x03" + b"asd") assert remaining == b"asd" assert s.a == 0 assert s.inner.b == 1 assert s.inner.c == 2 assert s.d == 3 def test_struct_init(): class TestStruct(t.Struct): a: t.uint8_t b: t.uint16_t c: t.CharacterString ts = TestStruct(a=1, b=0x0100, c="TestStruct") assert repr(ts) assert isinstance(ts.a, t.uint8_t) assert isinstance(ts.b, t.uint16_t) assert isinstance(ts.c, t.CharacterString) assert ts.a == 1 assert ts.b == 0x100 assert ts.c == "TestStruct" ts2, remaining = TestStruct.deserialize(b"\x01\x00\x01\x0aTestStruct") assert not remaining assert ts == ts2 assert ts.serialize() == ts2.serialize() ts3 = ts2.replace(b=0x0100) assert ts3 == ts2 assert ts3.serialize() == ts2.serialize() ts4 = ts2.replace(b=0x0101) assert ts4 != ts2 assert ts4.serialize() != ts2.serialize() def test_struct_string_is_none(): class TestStruct(t.Struct): a: t.CharacterString # str(None) == "None", which is bad with pytest.raises(ValueError): TestStruct(a=None).serialize() def test_struct_field_dependencies(): class TestStruct(t.Struct): foo: t.uint8_t status: Status bar: t.uint8_t = t.StructField(requires=lambda s: s.status == Status.SUCCESS) baz: t.uint8_t # Status is FAILURE so bar is not defined TestStruct(foo=1, status=Status.FAILURE, baz=2) ts1, remaining = TestStruct.deserialize( b"\x01" + Status.SUCCESS.serialize() + b"\x02\x03" ) assert not remaining assert ts1 == TestStruct(foo=1, status=Status.SUCCESS, bar=2, baz=3) ts2, remaining = TestStruct.deserialize( b"\x01" + Status.FAILURE.serialize() + b"\x02\x03" ) assert remaining == b"\x03" assert ts2 == TestStruct(foo=1, status=Status.FAILURE, bar=None, baz=2) def test_struct_field_invalid_dependencies(): class TestStruct(t.Struct): status: t.uint8_t value: t.uint8_t = t.StructField(requires=lambda s: s.status == 0x00) # Value will be ignored during serialization even though it has been assigned ts1 = TestStruct(status=0x01, value=0x02) assert ts1.serialize() == b"\x01" assert len(ts1.assigned_fields()) == 1 # Value wasn't provided but it is required ts2 = TestStruct(status=0x00, value=None) assert len(ts1.assigned_fields()) == 1 with pytest.raises(ValueError): ts2.serialize() # Value is not optional but doesn't need to be passed due to dependencies ts3 = TestStruct(status=0x01) assert ts3.serialize() == b"\x01" assert len(ts3.assigned_fields()) == 1 def test_struct_multiple_requires(expose_global): @expose_global class StrictStatus(t.enum8): SUCCESS = 0x00 FAILURE = 0x01 # Missing members cause a parsing failure _missing_ = enum.Enum._missing_ class TestStruct(t.Struct): foo: t.uint8_t status1: StrictStatus value1: t.uint8_t = t.StructField( requires=lambda s: s.status1 == StrictStatus.SUCCESS ) status2: StrictStatus value2: t.uint8_t = t.StructField( requires=lambda s: s.status2 == StrictStatus.SUCCESS ) # status1: success, status2: success ts0, remaining = TestStruct.deserialize( b"\x00" + StrictStatus.SUCCESS.serialize() + b"\x01" + StrictStatus.SUCCESS.serialize() + b"\x02" ) assert not remaining assert ts0 == TestStruct( foo=0, status1=StrictStatus.SUCCESS, value1=1, status2=StrictStatus.SUCCESS, value2=2, ) # status1: failure, status2: success ts1, remaining = TestStruct.deserialize( b"\x00" + StrictStatus.FAILURE.serialize() + StrictStatus.SUCCESS.serialize() + b"\x02" ) assert not remaining assert ts1 == TestStruct( foo=0, status1=StrictStatus.FAILURE, status2=StrictStatus.SUCCESS, value2=2 ) # status1: success, status2: failure, trailing ts2, remaining = TestStruct.deserialize( b"\x00" + StrictStatus.SUCCESS.serialize() + b"\x01" + StrictStatus.FAILURE.serialize() + b"\x02" ) assert remaining == b"\x02" assert ts2 == TestStruct( foo=0, status1=StrictStatus.SUCCESS, value1=1, status2=StrictStatus.FAILURE ) # status1: failure, status2: failure ts3, remaining = TestStruct.deserialize( b"\x00" + StrictStatus.FAILURE.serialize() + StrictStatus.FAILURE.serialize() ) assert not remaining assert ts3 == TestStruct( foo=0, status1=StrictStatus.FAILURE, status2=StrictStatus.FAILURE ) with pytest.raises(ValueError): # status1: failure TestStruct.deserialize(b"\x00" + StrictStatus.FAILURE.serialize()) with pytest.raises(ValueError): # status1: failure, invalid trailing TestStruct.deserialize(b"\x00" + StrictStatus.FAILURE.serialize() + b"\xff") def test_struct_equality(): class TestStruct1(t.Struct): foo: t.uint8_t class TestStruct2(t.Struct): foo: t.uint8_t assert TestStruct1() != TestStruct2() assert TestStruct1(foo=1) != TestStruct2(foo=1) assert TestStruct1() == TestStruct1() assert TestStruct1(foo=1) == TestStruct1(foo=1) @pytest.mark.parametrize( "data", [ b"\x00", b"\x00\x00", b"\x01", b"\x01\x00", b"\x01\x02\x03", b"", b"\x00\x00\x00\x00", ], ) def test_struct_subclass_extension(data): class TestStruct(t.Struct): foo: t.uint8_t class TestStructSubclass(TestStruct): bar: t.uint8_t = t.StructField(requires=lambda s: s.foo == 0x01) class TestCombinedStruct(t.Struct): foo: t.uint8_t bar: t.uint8_t = t.StructField(requires=lambda s: s.foo == 0x01) assert len(TestStructSubclass.fields) == 2 assert len(TestCombinedStruct.fields) == 2 error1 = None error2 = None try: ts1, remaining1 = TestStructSubclass.deserialize(data) except Exception as e: error1 = e try: ts2, remaining2 = TestCombinedStruct.deserialize(data) except Exception as e: error2 = e assert (error1 and error2) or (not error1 and not error2) if error1 or error2: assert repr(error1) == repr(error2) else: assert ts1.as_dict() == ts2.as_dict() assert remaining1 == remaining2 def test_optional_struct_special_case(): class TestStruct(t.Struct): foo: t.uint8_t OptionalTestStruct = t.Optional(TestStruct) assert OptionalTestStruct.deserialize(b"") == (None, b"") assert OptionalTestStruct.deserialize(b"\x00") == ( OptionalTestStruct(foo=0x00), b"", ) def test_conflicting_types(): class GoodStruct(t.Struct): foo: t.uint8_t = t.StructField(type=t.uint8_t) with pytest.raises(TypeError): class BadStruct(t.Struct): foo: t.uint8_t = t.StructField(type=t.uint16_t) def test_requires_uses_instance_of_struct(): class TestStruct(t.Struct): foo: t.uint8_t # the first parameter is really an instance of TestStruct bar: t.uint8_t = t.StructField(requires=lambda s: s.test) @property def test(self): assert isinstance(self, TestStruct) return self.foo == 0x01 assert TestStruct.deserialize(b"\x00\x00") == (TestStruct(foo=0x00), b"\x00") assert TestStruct.deserialize(b"\x01\x00") == (TestStruct(foo=0x01, bar=0x00), b"") def test_uppercase_field(): class Neighbor(t.Struct): """Neighbor Descriptor""" PanId: t.EUI64 IEEEAddr: t.EUI64 NWKAddr: t.NWK NeighborType: t.uint8_t PermitJoining: t.uint8_t Depth: t.uint8_t LQI: t.uint8_t # this should not be a constant assert len(Neighbor.fields) == 7 assert Neighbor.fields[6].name == "LQI" assert Neighbor.fields[6].type == t.uint8_t def test_non_annotated_field(): with pytest.raises(TypeError): class TestStruct1(t.Struct): field1: t.uint8_t # Python does not provide any simple way to get the order of both defined # class attributes and annotations. This is bad. field2 = t.StructField(type=t.uint16_t) field3: t.uint32_t class TestStruct2(t.Struct): field1: t.uint8_t field2: None = t.StructField(type=t.uint16_t) field3: t.uint32_t assert len(TestStruct2.fields) == 3 assert TestStruct2.fields[0] == t.StructField(name="field1", type=t.uint8_t) assert TestStruct2.fields[1] == t.StructField(name="field2", type=t.uint16_t) assert TestStruct2.fields[2] == t.StructField(name="field3", type=t.uint32_t) def test_allowed_non_fields(): class Other: def bar(self): return "bar" def foo2_(_): return "foo2" class TestStruct(t.Struct): @property def prop(self): return "prop" @prop.setter def prop(self, value): return foo1 = lambda _: "foo1" # noqa: E731 foo2 = foo2_ bar = Other.bar field: t.uint8_t CONSTANT1: t.uint8_t = "CONSTANT1" CONSTANT2 = "CONSTANT2" assert len(TestStruct.fields) == 1 assert TestStruct.CONSTANT1 == "CONSTANT1" assert TestStruct.CONSTANT2 == "CONSTANT2" assert TestStruct().prop == "prop" assert TestStruct().foo1() == "foo1" assert TestStruct().foo2() == "foo2" assert TestStruct().bar() == "bar" instance = TestStruct() instance.prop = None assert instance.prop == "prop" def test_as_dict_empty_fields(): class TestStruct(t.Struct): foo: t.uint8_t bar: t.uint8_t = t.StructField(requires=lambda s: s.foo == 0x01) assert TestStruct(foo=1, bar=2).as_dict() == {"foo": 1, "bar": 2} assert TestStruct(foo=0, bar=2).as_dict() == {"foo": 0, "bar": 2} assert TestStruct(foo=0).as_dict() == {"foo": 0, "bar": None} # Same thing as above but assigned as attributes ts1 = TestStruct() ts1.foo = 1 ts1.bar = 2 assert ts1.as_dict() == {"foo": 1, "bar": 2} ts2 = TestStruct() ts2.foo = 0 ts2.bar = 2 assert ts2.as_dict() == {"foo": 0, "bar": 2} ts3 = TestStruct() ts3.foo = 0 assert ts3.as_dict() == {"foo": 0, "bar": None} def test_no_types(): with pytest.raises(TypeError): class TestBadStruct(t.Struct): field: None = t.StructField() def test_repr(): class TestStruct(t.Struct): foo: t.uint8_t assert repr(TestStruct(foo=1)) == "TestStruct(foo=1)" assert repr(TestStruct(foo=None)) == "TestStruct()" # Invalid values still work ts = TestStruct() ts.foo = 1j assert repr(ts) == "TestStruct(foo=1j)" def test_repr_properties(): class TestStruct(t.Struct): foo: t.uint8_t bar: t.uint8_t @property def baz(self): if self.bar is None: return None return t.Bool((self.bar & 0xF0) >> 4) assert repr(TestStruct(foo=1)) == "TestStruct(foo=1)" assert ( repr(TestStruct(foo=1, bar=16)) == "TestStruct(foo=1, bar=16, *baz=)" ) assert repr(TestStruct()) == "TestStruct()" def test_bitstruct_simple(): class BitStruct1(t.Struct): foo: t.uint4_t bar: t.uint4_t s = BitStruct1(foo=0b1100, bar=0b1010) assert s.serialize() == bytes([0b1010_1100]) s2, remaining = BitStruct1.deserialize(b"\x01\x02") assert remaining == b"\x02" assert s2.foo == 0b0001 assert s2.bar == 0b0000 def test_bitstruct_nesting(expose_global): @expose_global class InnerBitStruct(t.Struct): baz1: t.uint1_t baz2: t.uint3_t baz3: t.uint1_t baz4: t.uint3_t class OuterStruct(t.Struct): foo: t.LVBytes bar: InnerBitStruct asd: t.uint8_t inner = InnerBitStruct(baz1=0b1, baz2=0b010, baz3=0b0, baz4=0b111) assert inner.serialize() == bytes([0b111_0_010_1]) assert InnerBitStruct.deserialize(inner.serialize() + b"asd") == (inner, b"asd") s = OuterStruct(foo=b"asd", bar=inner, asd=0xFF) assert s.serialize() == b"\x03asd" + bytes([0b111_0_010_1]) + b"\xFF" s2, remaining = OuterStruct.deserialize(s.serialize() + b"test") assert remaining == b"test" assert s == s2 def test_bitstruct_misaligned(): class TestStruct(t.Struct): foo: t.uint1_t bar: t.uint8_t # Even though this field is byte-serializable, it is misaligned baz: t.uint7_t s = TestStruct(foo=0b1, bar=0b10101010, baz=0b1110111) assert s.serialize() == bytes([0b1110111_1, 0b0101010_1]) s2, remaining = TestStruct.deserialize(s.serialize() + b"asd") assert s == s2 with pytest.raises(ValueError): TestStruct.deserialize(b"\xFF") def test_non_byte_sized_struct(): class TestStruct(t.Struct): foo: t.uint1_t bar: t.uint8_t s = TestStruct(foo=1, bar=2) with pytest.raises(ValueError): s.serialize() with pytest.raises(ValueError): TestStruct.deserialize(b"\x00\x00\x00\x00") def test_non_aligned_struct_non_integer_types(): class TestStruct(t.Struct): foo: t.uint1_t bar: t.data8 foo: t.uint7_t s = TestStruct(foo=1, bar=[2]) with pytest.raises(ValueError): s.serialize() with pytest.raises(ValueError): TestStruct.deserialize(b"\x00\x00\x00\x00") def test_bitstruct_complex(): data = ( b"\x11\x00\xff\xee\xdd\xcc\xbb\xaa\x08\x07\x06" b"\x05\x04\x03\x02\x01\x00\x00\x24\x02\x00\x7c" ) neighbor, rest = zdo_t.Neighbor.deserialize(data + b"asd") assert rest == b"asd" neighbor2 = zdo_t.Neighbor( extended_pan_id=t.ExtendedPanId.convert("aa:bb:cc:dd:ee:ff:00:11"), ieee=t.EUI64.convert("01:02:03:04:05:06:07:08"), nwk=0x0000, device_type=zdo_t.Neighbor.DeviceType.Coordinator, rx_on_when_idle=zdo_t.Neighbor.RxOnWhenIdle.On, relationship=zdo_t.Neighbor.RelationShip.Sibling, reserved1=0b0, permit_joining=zdo_t.Neighbor.PermitJoins.Unknown, reserved2=0b000000, depth=0, lqi=124, ) assert neighbor == neighbor2 assert neighbor2.serialize() == data def test_int_struct(): class NonIntegralStruct(t.Struct): foo: t.uint8_t with pytest.raises(TypeError): int(NonIntegralStruct(123)) class IntegralStruct(t.Struct, t.uint32_t): foo: t.uint8_t bar: t.uint16_t baz: t.uint7_t asd: t.uint1_t class IntegralStruct2(IntegralStruct): pass assert ( IntegralStruct(0b0_1110001_1100110011001100_10101010) == IntegralStruct( foo=0b10101010, bar=0b1100110011001100, baz=0b1110001, asd=0b0, ) == 0b0_1110001_1100110011001100_10101010 ) assert ( IntegralStruct2(0b0_1110001_1100110011001100_10101010) == IntegralStruct2( foo=0b10101010, bar=0b1100110011001100, baz=0b1110001, asd=0b0, ) == 0b0_1110001_1100110011001100_10101010 ) with pytest.raises(ValueError): # One extra bit IntegralStruct(0b1_0_1110001_1100110011001100_10101010) assert issubclass(IntegralStruct, t.uint32_t) assert issubclass(IntegralStruct, int) assert isinstance(IntegralStruct(), t.uint32_t) assert isinstance(IntegralStruct(), int) def test_struct_optional(): class TestStruct(t.Struct): foo: t.uint8_t bar: t.uint16_t baz: t.uint8_t = t.StructField(requires=lambda s: s.bar == 2, optional=True) s1 = TestStruct(foo=1, bar=2, baz=3) assert s1.serialize() == b"\x01\x02\x00\x03" assert TestStruct.deserialize(s1.serialize() + b"asd") == (s1, b"asd") assert s1.replace(baz=None).serialize() == b"\x01\x02\x00" assert s1.replace(bar=4).serialize() == b"\x01\x04\x00" assert TestStruct.deserialize(b"\x01\x03\x00\x04") == ( TestStruct(foo=1, bar=3), b"\x04", ) def test_struct_field_repr(): class TestStruct(t.Struct): foo: t.uint8_t = t.StructField(repr=lambda v: v + 1) bar: t.uint16_t = t.StructField(repr=lambda v: "bar") baz: t.CharacterString = t.StructField(repr=lambda v: "baz") s1 = TestStruct(foo=1, bar=2, baz="asd") assert repr(s1) == "TestStruct(foo=2, bar=bar, baz=baz)" def test_skip_missing(): class TestStruct(t.Struct): foo: t.uint8_t bar: t.uint16_t assert TestStruct(foo=1).as_dict() == {"foo": 1, "bar": None} assert TestStruct(foo=1).as_dict(skip_missing=True) == {"foo": 1} assert TestStruct(foo=1).as_tuple() == (1, None) assert TestStruct(foo=1).as_tuple(skip_missing=True) == (1,) def test_from_dict(expose_global): @expose_global class InnerStruct(t.Struct): field1: t.uint8_t field2: t.CharacterString class TestStruct(t.Struct): foo: t.uint8_t bar: InnerStruct baz: t.CharacterString s = TestStruct(foo=1, bar=InnerStruct(field1=2, field2="field2"), baz="field3") assert s == TestStruct.from_dict(s.as_dict(recursive=True)) def test_matching(expose_global): @expose_global class InnerStruct(t.Struct): field1: t.uint8_t field2: t.CharacterString class TestStruct(t.Struct): foo: t.uint8_t bar: InnerStruct baz: t.CharacterString assert TestStruct().matches(TestStruct()) assert not TestStruct().matches(InnerStruct()) assert TestStruct(foo=1).matches(TestStruct(foo=1)) assert not TestStruct(foo=1).matches(TestStruct(foo=2)) assert TestStruct(foo=1).matches(TestStruct()) s = TestStruct(foo=1, bar=InnerStruct(field1=2, field2="asd"), baz="foo") assert s.matches(s) assert s.matches(TestStruct()) assert s.matches(TestStruct(bar=InnerStruct())) assert s.matches(TestStruct(bar=InnerStruct(field1=2, field2="asd"))) assert not s.matches(TestStruct(bar=InnerStruct(field1=3))) def test_dynamic_type(): class TestStruct(t.Struct): foo: t.uint8_t baz: None = t.StructField( dynamic_type=lambda s: t.LVBytes if s.foo == 0x00 else t.uint8_t ) assert TestStruct.deserialize(b"\x00\x04test") == ( TestStruct(foo=0x00, baz=b"test"), b"", ) assert TestStruct.deserialize(b"\x01\x04test") == ( TestStruct(foo=0x01, baz=0x04), b"test", ) assert TestStruct(foo=0x00, baz=b"test").serialize() == b"\x00\x04test" assert TestStruct(foo=0x01, baz=0x04).serialize() == b"\x01\x04" def test_int_comparison(expose_global): @expose_global class FirmwarePlatform(t.enum8): Conbee = 0x05 Conbee_II = 0x07 Conbee_III = 0x09 class FirmwareVersion(t.Struct, t.uint32_t): reserved: t.uint8_t platform: FirmwarePlatform minor: t.uint8_t major: t.uint8_t fw_ver = FirmwareVersion(0x264F0900) assert fw_ver == FirmwareVersion( reserved=0, platform=FirmwarePlatform.Conbee_III, minor=79, major=38 ) assert fw_ver == 0x264F0900 assert int(fw_ver) == 0x264F0900 assert "0x264F0900" in str(fw_ver) assert int(fw_ver) <= fw_ver assert fw_ver <= int(fw_ver) assert int(fw_ver) - 1 < fw_ver assert fw_ver < int(fw_ver) + 1 assert int(fw_ver) >= fw_ver assert fw_ver >= int(fw_ver) assert int(fw_ver) + 1 > fw_ver assert fw_ver > int(fw_ver) - 1 def test_int_comparison_non_int(expose_global): @expose_global class FirmwarePlatform(t.enum8): Conbee = 0x05 Conbee_II = 0x07 Conbee_III = 0x09 # This isn't an integer class FirmwareVersion(t.Struct): reserved: t.uint8_t platform: FirmwarePlatform minor: t.uint8_t major: t.uint8_t fw_ver = FirmwareVersion( reserved=0, platform=FirmwarePlatform.Conbee_III, minor=79, major=38 ) with pytest.raises(TypeError): fw_ver < 0 with pytest.raises(TypeError): fw_ver <= 0 with pytest.raises(TypeError): fw_ver > 0 with pytest.raises(TypeError): fw_ver >= 0 zigpy-0.62.3/tests/test_topology.py000066400000000000000000000312001456054056700174030ustar00rootroot00000000000000from __future__ import annotations import asyncio import contextlib from unittest import mock import pytest import zigpy.config as conf import zigpy.device import zigpy.endpoint import zigpy.profiles import zigpy.topology import zigpy.types as t import zigpy.zdo.types as zdo_t from tests.conftest import App, make_ieee, make_neighbor, make_route @pytest.fixture(autouse=True) def remove_request_delay(): with mock.patch("zigpy.topology.REQUEST_DELAY", new=(0, 0)): yield @pytest.fixture def topology(make_initialized_device): app = App( conf.ZIGPY_SCHEMA( { conf.CONF_DEVICE: {conf.CONF_DEVICE_PATH: "/dev/null"}, conf.CONF_TOPO_SKIP_COORDINATOR: True, } ) ) coordinator = make_initialized_device(app) coordinator.nwk = 0x0000 app.state.node_info.nwk = coordinator.nwk app.state.node_info.ieee = coordinator.ieee app.state.node_info.logical_type = zdo_t.LogicalType.Coordinator return zigpy.topology.Topology(app) @contextlib.contextmanager def patch_device_tables( device: zigpy.device.Device, neighbors: list | BaseException | zdo_t.Status, routes: list | BaseException | zdo_t.Status, ): def mgmt_lqi_req(StartIndex: t.uint8_t): status = zdo_t.Status.SUCCESS entries = 0 start_index = 0 table: list[zdo_t.Neighbor] = [] if isinstance(neighbors, zdo_t.Status): status = neighbors elif isinstance(neighbors, BaseException): raise neighbors else: entries = len(neighbors) start_index = StartIndex table = neighbors[StartIndex : StartIndex + 3] return list( { "Status": status, "Neighbors": zdo_t.Neighbors( Entries=entries, StartIndex=start_index, NeighborTableList=table, ), }.values() ) def mgmt_rtg_req(StartIndex: t.uint8_t): status = zdo_t.Status.SUCCESS entries = 0 start_index = 0 table: list[zdo_t.Route] = [] if isinstance(routes, zdo_t.Status): status = routes elif isinstance(routes, BaseException): raise routes else: entries = len(routes) start_index = StartIndex table = routes[StartIndex : StartIndex + 3] return list( { "Status": status, "Routes": zdo_t.Routes( Entries=entries, StartIndex=start_index, RoutingTableList=table, ), }.values() ) lqi_req_patch = mock.patch.object( device.zdo, "Mgmt_Lqi_req", mock.AsyncMock(side_effect=mgmt_lqi_req, spec_set=device.zdo.Mgmt_Lqi_req), ) rtg_req_patch = mock.patch.object( device.zdo, "Mgmt_Rtg_req", mock.AsyncMock(side_effect=mgmt_rtg_req, spec_set=device.zdo.Mgmt_Rtg_req), ) with lqi_req_patch, rtg_req_patch: yield async def test_scan_no_devices(topology) -> None: await topology.scan() assert not topology.neighbors assert not topology.routes @pytest.mark.parametrize( "neighbors, routes", [ ([], asyncio.TimeoutError()), ([], []), (asyncio.TimeoutError(), asyncio.TimeoutError()), ], ) async def test_scan_failures( topology, make_initialized_device, neighbors, routes ) -> None: dev = make_initialized_device(topology._app) with patch_device_tables(dev, neighbors=neighbors, routes=routes): await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 1 if not neighbors else 3 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 1 if not routes else 3 assert not topology.neighbors[dev.ieee] assert not topology.routes[dev.ieee] async def test_neighbors_not_supported(topology, make_initialized_device) -> None: dev = make_initialized_device(topology._app) with patch_device_tables(dev, neighbors=zdo_t.Status.NOT_SUPPORTED, routes=[]): await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 1 await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 2 async def test_routes_not_supported(topology, make_initialized_device) -> None: dev = make_initialized_device(topology._app) with patch_device_tables(dev, neighbors=[], routes=zdo_t.Status.NOT_SUPPORTED): await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 1 await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 2 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 1 async def test_routes_and_neighbors_not_supported( topology, make_initialized_device ) -> None: dev = make_initialized_device(topology._app) with patch_device_tables( dev, neighbors=zdo_t.Status.NOT_SUPPORTED, routes=zdo_t.Status.NOT_SUPPORTED ): await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 1 await topology.scan() assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 1 async def test_scan_end_device(topology, make_initialized_device) -> None: dev = make_initialized_device(topology._app) dev.node_desc.logical_type = zdo_t.LogicalType.EndDevice with patch_device_tables(dev, neighbors=[], routes=[]): await topology.scan() # The device will not be scanned because it is not a router assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 0 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 0 async def test_scan_explicit_device(topology, make_initialized_device) -> None: dev1 = make_initialized_device(topology._app) dev2 = make_initialized_device(topology._app) with patch_device_tables(dev1, neighbors=[], routes=[]): with patch_device_tables(dev2, neighbors=[], routes=[]): await topology.scan(devices=[dev2]) # Only the second device was scanned assert len(dev1.zdo.Mgmt_Lqi_req.mock_calls) == 0 assert len(dev1.zdo.Mgmt_Rtg_req.mock_calls) == 0 assert len(dev2.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(dev2.zdo.Mgmt_Rtg_req.mock_calls) == 1 async def test_scan_router_many(topology, make_initialized_device) -> None: dev = make_initialized_device(topology._app) with patch_device_tables( dev, neighbors=[ make_neighbor(ieee=make_ieee(2 + i), nwk=0x1234 + i) for i in range(100) ], routes=[ make_route(dest_nwk=0x1234 + i, next_hop=0x1234 + i) for i in range(100) ], ): await topology.scan() # We only permit three scans per request assert len(dev.zdo.Mgmt_Lqi_req.mock_calls) == 34 assert len(dev.zdo.Mgmt_Rtg_req.mock_calls) == 34 assert topology.neighbors[dev.ieee] == [ make_neighbor(ieee=make_ieee(2 + i), nwk=0x1234 + i) for i in range(100) ] assert topology.routes[dev.ieee] == [ make_route(dest_nwk=0x1234 + i, next_hop=0x1234 + i) for i in range(100) ] async def test_scan_skip_coordinator(topology, make_initialized_device) -> None: coordinator = topology._app._device assert coordinator.nwk == 0x0000 with patch_device_tables(coordinator, neighbors=[], routes=[]): await topology.scan() assert len(coordinator.zdo.Mgmt_Lqi_req.mock_calls) == 0 assert len(coordinator.zdo.Mgmt_Rtg_req.mock_calls) == 0 assert not topology.neighbors[coordinator.ieee] assert not topology.routes[coordinator.ieee] async def test_scan_coordinator(topology) -> None: app = topology._app app.config[conf.CONF_TOPO_SKIP_COORDINATOR] = False coordinator = app._device coordinator.node_desc.logical_type = zdo_t.LogicalType.Coordinator assert coordinator.nwk == 0x0000 with patch_device_tables( coordinator, neighbors=[ make_neighbor(ieee=make_ieee(2), nwk=0x1234), ], routes=[ make_route(dest_nwk=0x1234, next_hop=0x1234), ], ): await topology.scan() assert len(coordinator.zdo.Mgmt_Lqi_req.mock_calls) == 1 assert len(coordinator.zdo.Mgmt_Rtg_req.mock_calls) == 1 assert topology.neighbors[coordinator.ieee] == [ make_neighbor(ieee=make_ieee(2), nwk=0x1234) ] assert topology.routes[coordinator.ieee] == [ make_route(dest_nwk=0x1234, next_hop=0x1234) ] @mock.patch("zigpy.application.ControllerApplication._discover_unknown_device") async def test_discover_new_devices( discover_unknown_device, topology, make_initialized_device ) -> None: dev1 = make_initialized_device(topology._app) dev2 = make_initialized_device(topology._app) await topology._find_unknown_devices( neighbors={ dev1.ieee: [ # Existing devices make_neighbor(ieee=dev1.ieee, nwk=dev1.nwk), make_neighbor(ieee=dev2.ieee, nwk=dev2.nwk), # Unknown device make_neighbor( ieee=t.EUI64.convert("aa:bb:cc:dd:11:22:33:44"), nwk=0xFF00 ), ], dev2.ieee: [], }, routes={ dev1.ieee: [ # Existing devices make_route(dest_nwk=dev1.nwk, next_hop=dev1.nwk), make_route(dest_nwk=dev2.nwk, next_hop=dev2.nwk), # Via existing devices make_route(dest_nwk=0xFF01, next_hop=dev2.nwk), make_route(dest_nwk=dev2.nwk, next_hop=0xFF02), # Inactive route make_route( dest_nwk=0xFF03, next_hop=0xFF04, status=zdo_t.RouteStatus.Inactive ), ], dev2.ieee: [], }, ) assert len(discover_unknown_device.mock_calls) == 3 assert mock.call(0xFF00) in discover_unknown_device.mock_calls assert mock.call(0xFF01) in discover_unknown_device.mock_calls assert mock.call(0xFF02) in discover_unknown_device.mock_calls @mock.patch("zigpy.topology.Topology._scan") async def test_scan_start_concurrent(mock_scan, topology): concurrency = 0 max_concurrency = 0 async def _scan(_): nonlocal concurrency nonlocal max_concurrency concurrency += 1 max_concurrency = max(concurrency, max_concurrency) try: await asyncio.sleep(0.01) finally: concurrency -= 1 max_concurrency = max(concurrency, max_concurrency) mock_scan.side_effect = _scan topology.start_periodic_scans(0.1) topology.start_periodic_scans(0.1) topology.start_periodic_scans(0.1) topology.start_periodic_scans(0.1) topology.start_periodic_scans(0.1) scan1 = asyncio.create_task(topology.scan()) scan2 = asyncio.create_task(topology.scan()) await asyncio.sleep(0.01) with pytest.raises(asyncio.CancelledError): await scan1 await scan2 # Wait for a "scan" to finish await asyncio.sleep(0.15) await topology._scan_task topology.stop_periodic_scans() # Only a single one was actually running assert max_concurrency == 1 topology.stop_periodic_scans() await asyncio.sleep(0) # All of the tasks have been stopped assert topology._scan_task.done() assert topology._scan_loop_task.done() @mock.patch("zigpy.topology.Topology.scan", side_effect=RuntimeError()) async def test_periodic_scan_failure(mock_scan, topology): topology.start_periodic_scans(0.01) await asyncio.sleep(0.1) topology.stop_periodic_scans() async def test_periodic_scan_priority(topology): async def _scan(_): await asyncio.sleep(0.5) with mock.patch.object(topology, "_scan", side_effect=_scan) as mock_scan: scan_task = asyncio.create_task(topology.scan()) await asyncio.sleep(0.1) # Start a periodic scan. It won't have time to run yet, the old scan is running topology.start_periodic_scans(0.05) # Wait for the original scan to finish await scan_task # Start another scan, interrupting the periodic scan await asyncio.sleep(0.15) await topology.scan() # Now we can cancel the periodic scan topology.stop_periodic_scans() await asyncio.sleep(0) # Our two manual scans succeeded and the periodic one was attempted assert len(mock_scan.mock_calls) == 3 zigpy-0.62.3/tests/test_types.py000066400000000000000000000536561456054056700167160ustar00rootroot00000000000000import itertools import math import struct import pytest import zigpy.types as t def test_abstract_ints(): assert issubclass(t.uint8_t, t.uint_t) assert not issubclass(t.uint8_t, t.int_t) assert t.int_t._signed is True assert t.uint_t._signed is False assert t.int_t._byteorder == "little" assert t.int_t_be._byteorder == "big" with pytest.raises(TypeError): t.int_t(0) with pytest.raises(TypeError): t.FixedIntType(0) def test_int_out_of_bounds(): assert t.uint8_t._size == 1 assert t.uint8_t._bits == 8 t.uint8_t(0) with pytest.raises(ValueError): # Normally this would throw an OverflowError. We re-raise it as a ValueError. t.uint8_t(-1) with pytest.raises(ValueError): t.uint8_t(0xFF + 1) def test_int_too_short(): with pytest.raises(ValueError): t.uint8_t.deserialize(b"") with pytest.raises(ValueError): t.uint16_t.deserialize(b"\x00") def test_fractional_ints_corner(): assert t.uint1_t._size is None assert t.uint1_t._bits == 1 assert t.uint1_t.min_value == 0 assert t.uint1_t.max_value == 1 assert t.uint1_t(0) == 0 assert t.uint1_t(1) == 1 with pytest.raises(ValueError): t.uint1_t(-1) with pytest.raises(ValueError): t.uint1_t(2) n = t.uint1_t(0b1) with pytest.raises(TypeError): n.serialize() assert t.uint1_t(0).bits() == [0] assert t.uint1_t(1).bits() == [1] assert t.uint1_t.from_bits([1, 1]) == (1, [1]) assert t.uint1_t.from_bits([0, 1]) == (1, [0]) def test_fractional_ints_larger(): assert t.uint7_t._size is None assert t.uint7_t._bits == 7 assert t.uint7_t.min_value == 0 assert t.uint7_t.max_value == 2**7 - 1 assert t.uint7_t(0) == 0 assert t.uint7_t(1) == 1 assert t.uint7_t(0b1111111) == 0b1111111 with pytest.raises(ValueError): t.uint7_t(-1) with pytest.raises(ValueError): t.uint7_t(0b1111111 + 1) n = t.uint7_t(0b1111111) with pytest.raises(TypeError): n.serialize() assert t.uint7_t(0).bits() == [0, 0, 0, 0, 0, 0, 0] assert t.uint7_t(1).bits() == [0, 0, 0, 0, 0, 0, 1] assert t.uint7_t(0b1011111).bits() == [1, 0, 1, 1, 1, 1, 1] assert t.uint7_t.from_bits([1, 0, 1, 1, 1, 1, 0, 1, 1, 1]) == (0b1110111, [1, 0, 1]) with pytest.raises(ValueError): assert t.uint7_t.from_bits([1] * 6) def test_ints_signed(): class int7s(t.int_t, bits=7): pass assert int7s._size is None assert int7s._bits == 7 assert int7s(0) == 0 assert int7s(1) == 1 assert int7s(-1) == -1 assert int7s(2**6 - 1) == 2**6 - 1 assert int7s(-(2**6)) == -(2**6) with pytest.raises(ValueError): int7s(2**6) with pytest.raises(ValueError): int7s(-(2**6) - 1) n = int7s(2**6 - 1) with pytest.raises(TypeError): n.serialize() assert int7s(0).bits() == [0, 0, 0, 0, 0, 0, 0] assert int7s(1).bits() == [0, 0, 0, 0, 0, 0, 1] assert int7s(-1).bits() == [1, 1, 1, 1, 1, 1, 1] assert int7s(2**6 - 1).bits() == [0, 1, 1, 1, 1, 1, 1] assert int7s.from_bits([1, 0, 1, 0, 1, 1, 0, 1, 1, 1]) == (0b0110111, [1, 0, 1]) with pytest.raises(TypeError): int7s.deserialize(b"\xFF") t.int8s.deserialize(b"\xFF") n = t.int8s(-126) bits = [1, 0] + t.Bits.deserialize(n.serialize())[0] assert t.int8s.from_bits(bits) == (n, [1, 0]) def test_bigendian_ints(): assert t.uint32_t_be(0x12345678).serialize() == b"\x12\x34\x56\x78" assert t.uint32_t_be.deserialize(b"\x12\x34\x56\x78") == (0x12345678, b"") assert t.int32s_be(0x12345678).serialize() == b"\x12\x34\x56\x78" assert t.int32s_be(-1).serialize() == b"\xff\xff\xff\xff" assert t.int32s_be.deserialize(b"\xfe\xdc\xba\x98") == (-0x01234568, b"") assert ( t.uint32_t_be(0x12345678).serialize()[::-1] == t.uint32_t(0x12345678).serialize() ) def test_bits(): assert t.Bits() == [] assert t.Bits([1] + [0] * 15).serialize() == b"\x80\x00" assert t.Bits.deserialize(b"\x80\x00") == ([1] + [0] * 15, b"") bits = t.Bits([0] * 7) with pytest.raises(ValueError): assert bits.serialize() def compare_with_nan(v1, v2): if not math.isnan(v1) ^ math.isnan(v2): return True return v1 == v2 @pytest.mark.parametrize( "value", [ 1.25, 0, -1.25, float("nan"), float("+inf"), float("-inf"), # Max value held by Half 65504, -65504, ], ) def test_floats(value): extra = b"ab12!" for data_type in (t.Half, t.Single, t.Double): value2, remaining = data_type.deserialize(data_type(value).serialize() + extra) assert remaining == extra # nan != nan so make sure they're both nan or the same value assert compare_with_nan(value, value2) assert len(data_type(value).serialize()) == data_type._size @pytest.mark.parametrize( "value, only_double", [ (2, False), (1.25, False), (0, False), (-1.25, False), (-2, False), (float("nan"), False), (float("+inf"), False), (float("-inf"), False), (struct.unpack(">f", bytes.fromhex("7f7f ffff"))[0], False), (struct.unpack(">f", bytes.fromhex("3f7f ffff"))[0], False), (struct.unpack(">d", bytes.fromhex("7f7f ffff ffff ffff"))[0], True), (struct.unpack(">d", bytes.fromhex("3f7f ffff ffff ffff"))[0], True), ], ) def test_single_and_double_with_struct(value, only_double): # Float and double must match the behavior of the built-in struct module if not only_double: assert t.Single(value).serialize() == struct.pack("" assert f"0x{TestEnum.Member:02X}" == "0x00" def test_bitmap(): """Test bitmaps.""" class TestBitmap(t.bitmap16): CH_1 = 0x0010 CH_2 = 0x0020 CH_3 = 0x0040 CH_4 = 0x0080 ALL = 0x00F0 extra = b"extra data\xaa\55" data = b"\xf0\x00" r, rest = TestBitmap.deserialize(data + extra) assert rest == extra assert r is TestBitmap.ALL assert r.name == "ALL" assert r.value == 0x00F0 assert r.serialize() == data data = b"\x60\x00" r, rest = TestBitmap.deserialize(data + extra) assert rest == extra assert TestBitmap.CH_1 not in r assert TestBitmap.CH_2 in r assert TestBitmap.CH_3 in r assert TestBitmap.CH_4 not in r assert TestBitmap.ALL not in r assert r.value == 0x0060 assert r.serialize() == data def test_bitmap_undef(): """Test bitmaps with some undefined flags.""" class TestBitmap(t.bitmap16): CH_1 = 0x0010 CH_2 = 0x0020 CH_3 = 0x0040 CH_4 = 0x0080 ALL = 0x00F0 extra = b"extra data\xaa\55" data = b"\x60\x0f" r, rest = TestBitmap.deserialize(data + extra) assert rest == extra assert TestBitmap.CH_1 not in r assert TestBitmap.CH_2 in r assert TestBitmap.CH_3 in r assert TestBitmap.CH_4 not in r assert TestBitmap.ALL not in r assert r.value == 0x0F60 assert r.serialize() == data def test_bitmap_instance_types(): class TestBitmap(t.bitmap16): CH_1 = 0x0010 CH_2 = 0x0020 CH_3 = 0x0040 CH_4 = 0x0080 ALL = 0x00F0 assert TestBitmap._member_type_ is t.uint16_t assert type(TestBitmap.ALL.value) is t.uint16_t assert isinstance(TestBitmap.ALL, t.uint16_t) assert issubclass(TestBitmap, t.uint16_t) assert isinstance(TestBitmap(0xFF00), t.uint16_t) assert isinstance(TestBitmap(0xFF00), TestBitmap) def test_nwk_convert(): assert t.NWK.convert(str(t.NWK(0x1234))[2:]) == t.NWK(0x1234) assert str(t.NWK(0x0012))[2:] == "0012" assert str(t.NWK(0x1200))[2:] == "1200" def test_serializable_bytes(): obj = t.SerializableBytes(b"test") assert obj == obj assert obj == t.SerializableBytes(b"test") assert t.SerializableBytes(obj) == obj assert obj != b"test" assert obj.serialize() == b"test" assert "test" in repr([obj]) with pytest.raises(TypeError): obj + b"test" with pytest.raises(ValueError): t.SerializableBytes("test") with pytest.raises(ValueError): t.SerializableBytes([1, 2, 3]) zigpy-0.62.3/tests/test_zcl.py000066400000000000000000001132621456054056700163300ustar00rootroot00000000000000from __future__ import annotations import asyncio from unittest import mock import pytest import zigpy.device import zigpy.endpoint import zigpy.types as t import zigpy.zcl as zcl import zigpy.zcl.foundation as foundation from .async_mock import AsyncMock, MagicMock, int_sentinel, patch, sentinel DEFAULT_TSN = 123 @pytest.fixture def endpoint(): ep = zigpy.endpoint.Endpoint(MagicMock(), 1) ep.add_input_cluster(0) ep.add_input_cluster(3) return ep def test_deserialize_general(endpoint): hdr, args = endpoint.deserialize(0, b"\x00\x01\x00") assert hdr.tsn == 1 assert hdr.command_id == 0 assert hdr.direction == foundation.Direction.Client_to_Server def test_deserialize_general_unknown(endpoint): hdr, args = endpoint.deserialize(0, b"\x00\x01\xff") assert hdr.tsn == 1 assert hdr.frame_control.is_general is True assert hdr.frame_control.is_cluster is False assert hdr.command_id == 255 assert hdr.direction == foundation.Direction.Client_to_Server def test_deserialize_cluster(endpoint): hdr, args = endpoint.deserialize(0, b"\x01\x01\x00xxx") assert hdr.tsn == 1 assert hdr.frame_control.is_general is False assert hdr.frame_control.is_cluster is True assert hdr.command_id == 0 assert hdr.direction == foundation.Direction.Client_to_Server def test_deserialize_cluster_client(endpoint): hdr, args = endpoint.deserialize(3, b"\x09\x01\x00AB") assert hdr.tsn == 1 assert hdr.frame_control.is_general is False assert hdr.frame_control.is_cluster is True assert hdr.command_id == 0 assert list(args) == [0x4241] assert hdr.direction == foundation.Direction.Server_to_Client def test_deserialize_cluster_unknown(endpoint): with pytest.raises(KeyError): endpoint.deserialize(0xFF00, b"\x05\x00\x00\x01\x00") def test_deserialize_cluster_command_unknown(endpoint): hdr, args = endpoint.deserialize(0, b"\x01\x01\xff") assert hdr.tsn == 1 assert hdr.command_id == 255 assert hdr.direction == foundation.Direction.Client_to_Server def test_unknown_cluster(): c = zcl.Cluster.from_id(None, 999) assert isinstance(c, zcl.Cluster) assert c.cluster_id == 999 def test_manufacturer_specific_cluster(): import zigpy.zcl.clusters.manufacturer_specific as ms c = zcl.Cluster.from_id(None, 0xFC00) assert isinstance(c, ms.ManufacturerSpecificCluster) assert hasattr(c, "cluster_id") c = zcl.Cluster.from_id(None, 0xFFFF) assert isinstance(c, ms.ManufacturerSpecificCluster) assert hasattr(c, "cluster_id") @pytest.fixture def cluster_by_id(): def _cluster(cluster_id=0): epmock = MagicMock() epmock._device.get_sequence.return_value = DEFAULT_TSN epmock.device.get_sequence.return_value = DEFAULT_TSN epmock.request = AsyncMock() epmock.reply = AsyncMock() return zcl.Cluster.from_id(epmock, cluster_id) return _cluster @pytest.fixture def cluster(cluster_by_id): return cluster_by_id(0) @pytest.fixture def client_cluster(): epmock = AsyncMock() epmock.device.get_sequence = MagicMock(return_value=DEFAULT_TSN) return zcl.Cluster.from_id(epmock, 3) async def test_request_general(cluster): await cluster.request(True, 0, []) assert cluster._endpoint.request.call_count == 1 async def test_request_manufacturer(cluster): await cluster.request(True, 0, [t.uint8_t], 1) assert cluster._endpoint.request.call_count == 1 org_size = len(cluster._endpoint.request.call_args[0][2]) await cluster.request(True, 0, [t.uint8_t], 1, manufacturer=1) assert cluster._endpoint.request.call_count == 2 assert org_size + 2 == len(cluster._endpoint.request.call_args[0][2]) async def test_request_optional(cluster): schema = [t.uint8_t, t.uint16_t, t.Optional(t.uint16_t), t.Optional(t.uint8_t)] cluster.endpoint.request = AsyncMock() with pytest.raises(ValueError): await cluster.request(True, 0, schema) assert cluster._endpoint.request.call_count == 0 cluster._endpoint.request.reset_mock() with pytest.raises(ValueError): await cluster.request(True, 0, schema, 1) assert cluster._endpoint.request.call_count == 0 cluster._endpoint.request.reset_mock() await cluster.request(True, 0, schema, 1, 2) assert cluster._endpoint.request.call_count == 1 cluster._endpoint.request.reset_mock() await cluster.request(True, 0, schema, 1, 2, 3) assert cluster._endpoint.request.call_count == 1 cluster._endpoint.request.reset_mock() await cluster.request(True, 0, schema, 1, 2, 3, 4) assert cluster._endpoint.request.call_count == 1 cluster._endpoint.request.reset_mock() with pytest.raises(TypeError): await cluster.request(True, 0, schema, 1, 2, 3, 4, 5) assert cluster._endpoint.request.call_count == 0 cluster._endpoint.request.reset_mock() async def test_reply_general(cluster): await cluster.reply(False, 0, []) assert cluster._endpoint.reply.call_count == 1 async def test_reply_manufacturer(cluster): await cluster.reply(False, 0, [t.uint8_t], 1) assert cluster._endpoint.reply.call_count == 1 org_size = len(cluster._endpoint.reply.call_args[0][2]) await cluster.reply(False, 0, [t.uint8_t], 1, manufacturer=1) assert cluster._endpoint.reply.call_count == 2 assert org_size + 2 == len(cluster._endpoint.reply.call_args[0][2]) def test_attribute_report(cluster): attr = zcl.foundation.Attribute() attr.attrid = 4 attr.value = zcl.foundation.TypeValue() attr.value.value = "manufacturer" hdr = MagicMock(auto_spec=foundation.ZCLHeader) hdr.command_id = foundation.GeneralCommand.Report_Attributes hdr.frame_control.is_general = True hdr.frame_control.is_cluster = False cmd = foundation.GENERAL_COMMANDS[ foundation.GeneralCommand.Report_Attributes ].schema([attr]) cluster.handle_message(hdr, cmd) assert cluster._attr_cache[4] == "manufacturer" attr.attrid = 0x89AB cluster.handle_message(hdr, cmd) assert cluster._attr_cache[attr.attrid] == "manufacturer" def mock_type(*args, **kwargs): raise ValueError with patch.dict( cluster.attributes, {0xAAAA: foundation.ZCLAttributeDef(id=0xAAAA, name="Name", type=mock_type)}, ): attr.attrid = 0xAAAA cluster.handle_message(hdr, cmd) assert cluster._attr_cache[attr.attrid] == "manufacturer" def test_handle_request_unknown(cluster): hdr = MagicMock(auto_spec=foundation.ZCLHeader) hdr.command_id = int_sentinel.command_id hdr.frame_control.is_general = True hdr.frame_control.is_cluster = False cluster.listener_event = MagicMock() cluster._update_attribute = MagicMock() cluster.handle_cluster_general_request = MagicMock() cluster.handle_cluster_request = MagicMock() cluster.handle_message(hdr, sentinel.args) assert cluster.listener_event.call_count == 1 assert cluster.listener_event.call_args[0][0] == "general_command" assert cluster._update_attribute.call_count == 0 assert cluster.handle_cluster_general_request.call_count == 1 assert cluster.handle_cluster_request.call_count == 0 def test_handle_cluster_request(cluster): hdr = MagicMock(auto_spec=foundation.ZCLHeader) hdr.command_id = int_sentinel.command_id hdr.frame_control.is_general = False hdr.frame_control.is_cluster = True hdr.command_id.is_general = False cluster.listener_event = MagicMock() cluster._update_attribute = MagicMock() cluster.handle_cluster_general_request = MagicMock() cluster.handle_cluster_request = MagicMock() cluster.handle_message(hdr, sentinel.args) assert cluster.listener_event.call_count == 1 assert cluster.listener_event.call_args[0][0] == "cluster_command" assert cluster._update_attribute.call_count == 0 assert cluster.handle_cluster_general_request.call_count == 0 assert cluster.handle_cluster_request.call_count == 1 def _mk_rar(attrid, value, status=0): r = zcl.foundation.ReadAttributeRecord() r.attrid = attrid r.status = status r.value = zcl.foundation.TypeValue() r.value.value = value return r async def test_read_attributes_uncached(cluster): async def mockrequest( is_general_req, command, schema, args, manufacturer=None, **kwargs ): assert is_general_req is True assert command == 0 rar0 = _mk_rar(0, 99) rar4 = _mk_rar(4, "Manufacturer") rar99 = _mk_rar(99, None, 1) rar199 = _mk_rar(199, 199) rar16 = _mk_rar(0x0010, None, zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE) return [[rar0, rar4, rar99, rar199, rar16]] cluster.request = mockrequest success, failure = await cluster.read_attributes([0, "manufacturer", 99, 199, 16]) assert success[0] == 99 assert success["manufacturer"] == "Manufacturer" assert failure[99] == 1 assert {99, 0x0010} == failure.keys() assert success[199] == 199 assert cluster.unsupported_attributes == {0x0010, "location_desc"} async def test_read_attributes_cached(cluster): cluster.request = MagicMock() cluster._attr_cache[0] = 99 cluster._attr_cache[4] = "Manufacturer" cluster.unsupported_attributes.add(0x0010) success, failure = await cluster.read_attributes( [0, "manufacturer", 0x0010], allow_cache=True ) assert cluster.request.call_count == 0 assert success[0] == 99 assert success["manufacturer"] == "Manufacturer" assert failure == {0x0010: zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE} async def test_read_attributes_mixed_cached(cluster): """Reading cached and uncached attributes.""" cluster.request = AsyncMock(return_value=[[_mk_rar(5, "Model")]]) cluster._attr_cache[0] = 99 cluster._attr_cache[4] = "Manufacturer" cluster.unsupported_attributes.add(0x0010) success, failure = await cluster.read_attributes( [0, "manufacturer", "model", 0x0010], allow_cache=True ) assert success[0] == 99 assert success["manufacturer"] == "Manufacturer" assert success["model"] == "Model" assert cluster.request.await_count == 1 assert cluster.request.call_args[0][3] == [0x0005] assert failure == {0x0010: zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE} async def test_read_attributes_default_response(cluster): async def mockrequest( foundation, command, schema, args, manufacturer=None, **kwargs ): assert foundation is True assert command == 0 return [0xC1] cluster.request = mockrequest success, failure = await cluster.read_attributes([0, 5, 23], allow_cache=False) assert success == {} assert failure == {0: 0xC1, 5: 0xC1, 23: 0xC1} async def test_read_attributes_value_normalization_error(cluster): async def mockrequest( foundation, command, schema, args, manufacturer=None, **kwargs ): assert foundation is True assert command == 0 rar5 = _mk_rar(5, "Model") return [[rar5]] def mock_type(*args, **kwargs): raise ValueError cluster.request = mockrequest with patch.dict( cluster.attributes, {5: foundation.ZCLAttributeDef(id=5, name="Name", type=mock_type)}, ): success, failure = await cluster.read_attributes(["model"], allow_cache=True) assert failure == {} assert success["model"] == "Model" async def test_item_access_attributes(cluster): cluster._attr_cache[5] = sentinel.model assert cluster["model"] == sentinel.model assert cluster[5] == sentinel.model assert cluster.get("model") == sentinel.model assert cluster.get(5) == sentinel.model assert cluster.get("model", sentinel.default) == sentinel.model assert cluster.get(5, sentinel.default) == sentinel.model with pytest.raises(KeyError): cluster[4] assert cluster.get(4) is None assert cluster.get("manufacturer") is None assert cluster.get(4, sentinel.default) is sentinel.default assert cluster.get("manufacturer", sentinel.default) is sentinel.default with pytest.raises(KeyError): cluster["manufacturer"] with pytest.raises(KeyError): # wrong attr name cluster["some_non_existent_attr"] with pytest.raises(ValueError): # wrong key type cluster[None] with pytest.raises(ValueError): # wrong key type cluster.get(None) # Test access to cached attribute via wrong attr name with pytest.raises(KeyError): cluster.get("no_such_attribute") async def test_item_set_attributes(cluster): with patch.object(cluster, "write_attributes") as write_mock: cluster["model"] = sentinel.model await asyncio.sleep(0) assert write_mock.await_count == 1 assert write_mock.call_args[0][0] == {"model": sentinel.model} with pytest.raises(ValueError): cluster[None] = sentinel.manufacturer async def test_write_attributes(cluster): with patch.object(cluster, "_write_attributes", new=AsyncMock()): await cluster.write_attributes({0: 5, "app_version": 4}) assert cluster._write_attributes.call_count == 1 async def test_write_wrong_attribute(cluster): with patch.object(cluster, "_write_attributes", new=AsyncMock()): await cluster.write_attributes({0xFF: 5}) assert cluster._write_attributes.call_count == 1 async def test_write_unknown_attribute(cluster): with patch.object(cluster, "_write_attributes", new=AsyncMock()): with pytest.raises(KeyError): # Using an invalid attribute name, the call should fail await cluster.write_attributes({"dummy_attribute": 5}) assert cluster._write_attributes.call_count == 0 async def test_write_attributes_wrong_type(cluster): with patch.object(cluster, "_write_attributes", new=AsyncMock()): await cluster.write_attributes({18: 0x2222}) assert cluster._write_attributes.call_count == 1 async def test_write_attributes_raw(cluster): with patch.object(cluster, "_write_attributes", new=AsyncMock()): # write_attributes_raw does not check the attributes, # send to unknown attribute in cluster, the write should be effective await cluster.write_attributes_raw({0: 5, 0x3000: 5}) assert cluster._write_attributes.call_count == 1 @pytest.mark.parametrize( "cluster_id, attr, value, serialized", ( (0, "zcl_version", 0xAA, b"\x00\x00\x20\xaa"), (0, "model", "model x", b"\x05\x00\x42\x07model x"), (0, "device_enabled", True, b"\x12\x00\x10\x01"), (0, "alarm_mask", 0x55, b"\x13\x00\x18\x55"), (0x0202, "fan_mode", 0xDE, b"\x00\x00\x30\xde"), ), ) async def test_write_attribute_types( cluster_id, attr, value, serialized, cluster_by_id ): cluster = cluster_by_id(cluster_id) with patch.object(cluster.endpoint, "request", new=AsyncMock()): await cluster.write_attributes({attr: value}) assert cluster._endpoint.reply.call_count == 0 assert cluster._endpoint.request.call_count == 1 assert cluster.endpoint.request.call_args[0][2][3:] == serialized @pytest.mark.parametrize( "status", (foundation.Status.SUCCESS, foundation.Status.UNSUPPORTED_ATTRIBUTE) ) async def test_write_attributes_cache_default_response(cluster, status): write_mock = AsyncMock( return_value=[foundation.GeneralCommand.Write_Attributes, status] ) with patch.object(cluster, "_write_attributes", write_mock): attributes = {4: "manufacturer", 5: "model", 12: 12} await cluster.write_attributes(attributes) assert cluster._write_attributes.call_count == 1 for attr_id in attributes: assert attr_id not in cluster._attr_cache @pytest.mark.parametrize( "attributes, result", ( ({4: "manufacturer"}, b"\x00"), ({4: "manufacturer", 5: "model"}, b"\x00"), ({4: "manufacturer", 5: "model", 3: 12}, b"\x00"), ({4: "manufacturer", 5: "model"}, b"\x00\x00"), ({4: "manufacturer", 5: "model", 3: 12}, b"\x00\x00\x00"), ), ) async def test_write_attributes_cache_success(cluster, attributes, result): listener = MagicMock() cluster.add_listener(listener) rsp_type = t.List[foundation.WriteAttributesStatusRecord] write_mock = AsyncMock(return_value=[rsp_type.deserialize(result)[0]]) with patch.object(cluster, "_write_attributes", write_mock): await cluster.write_attributes(attributes) assert cluster._write_attributes.call_count == 1 for attr_id in attributes: assert cluster._attr_cache[attr_id] == attributes[attr_id] listener.attribute_updated.assert_any_call( attr_id, attributes[attr_id], mock.ANY ) @pytest.mark.parametrize( "attributes, result, failed", ( ({4: "manufacturer"}, b"\x86\x04\x00", [4]), ({4: "manufacturer", 5: "model"}, b"\x86\x05\x00", [5]), ({4: "manufacturer", 5: "model"}, b"\x86\x04\x00\x86\x05\x00", [4, 5]), ( {4: "manufacturer", 5: "model", 3: 12}, b"\x86\x05\x00", [5], ), ( {4: "manufacturer", 5: "model", 3: 12}, b"\x86\x05\x00\x01\x03\x00", [5, 3], ), ( {4: "manufacturer", 5: "model", 3: 12}, b"\x02\x04\x00\x86\x05\x00\x01\x03\x00", [4, 5, 3], ), ), ) async def test_write_attributes_cache_failure(cluster, attributes, result, failed): listener = MagicMock() cluster.add_listener(listener) rsp_type = foundation.WriteAttributesResponse write_mock = AsyncMock(return_value=[rsp_type.deserialize(result)[0]]) with patch.object(cluster, "_write_attributes", write_mock): await cluster.write_attributes(attributes) assert cluster._write_attributes.call_count == 1 for attr_id in attributes: if attr_id in failed: assert attr_id not in cluster._attr_cache # Failed writes do not propagate with pytest.raises(AssertionError): listener.attribute_updated.assert_any_call( attr_id, attributes[attr_id] ) else: assert cluster._attr_cache[attr_id] == attributes[attr_id] listener.attribute_updated.assert_any_call( attr_id, attributes[attr_id], mock.ANY ) async def test_read_attributes_response(cluster): await cluster.read_attributes_rsp({0: 5}) assert cluster._endpoint.reply.call_count == 1 assert cluster._endpoint.request.call_count == 0 async def test_read_attributes_resp_unsupported(cluster): await cluster.read_attributes_rsp({0: 5}) assert cluster._endpoint.reply.call_count == 1 assert cluster._endpoint.request.call_count == 0 orig_len = len(cluster._endpoint.reply.call_args[0][2]) await cluster.read_attributes_rsp({0: 5, 2: None}) assert cluster._endpoint.reply.call_count == 2 assert cluster._endpoint.request.call_count == 0 assert len(cluster._endpoint.reply.call_args[0][2]) == orig_len + 3 async def test_read_attributes_resp_str(cluster): await cluster.read_attributes_rsp({"hw_version": 32}) assert cluster._endpoint.reply.call_count == 1 assert cluster._endpoint.request.call_count == 0 async def test_read_attributes_resp_exc(cluster): with patch.object(foundation.DATA_TYPES, "pytype_to_datatype_id") as mck: mck.side_effect = ValueError await cluster.read_attributes_rsp({"hw_version": 32}) assert cluster._endpoint.reply.call_count == 1 assert cluster._endpoint.request.call_count == 0 assert cluster.endpoint.reply.call_args[0][2][-3:] == b"\x03\x00\x86" @pytest.mark.parametrize( "cluster_id, attr, value, serialized", ( (0, "zcl_version", 0xAA, b"\x00\x00\x00\x20\xaa"), (0, "model", "model x", b"\x05\x00\x00\x42\x07model x"), (0, "device_enabled", True, b"\x12\x00\x00\x10\x01"), (0, "alarm_mask", 0x55, b"\x13\x00\x00\x18\x55"), (0x0202, "fan_mode", 0xDE, b"\x00\x00\x00\x30\xde"), ), ) async def test_read_attribute_resp(cluster_id, attr, value, serialized, cluster_by_id): cluster = cluster_by_id(cluster_id) await cluster.read_attributes_rsp({attr: value}) assert cluster._endpoint.reply.call_count == 1 assert cluster._endpoint.request.call_count == 0 assert cluster.endpoint.reply.call_args[0][2][3:] == serialized def test_bind(cluster): cluster.bind() def test_unbind(cluster): cluster.unbind() async def test_configure_reporting(cluster): await cluster.configure_reporting(0, 10, 20, 1) async def test_configure_reporting_named(cluster): await cluster.configure_reporting("zcl_version", 10, 20, 1) assert cluster._endpoint.request.call_count == 1 async def test_configure_reporting_wrong_named(cluster): with pytest.raises(ValueError): await cluster.configure_reporting("wrong_attr_name", 10, 20, 1) assert cluster._endpoint.request.call_count == 0 async def test_configure_reporting_wrong_attrid(cluster): with pytest.raises(ValueError): await cluster.configure_reporting(0xABCD, 10, 20, 1) assert cluster._endpoint.request.call_count == 0 async def test_configure_reporting_manuf(): ep = MagicMock() cluster = zcl.Cluster.from_id(ep, 6) cluster.request = AsyncMock(name="request") await cluster.configure_reporting(0, 10, 20, 1) cluster.request.assert_called_with( True, 0x06, mock.ANY, mock.ANY, expect_reply=True, manufacturer=None, tsn=mock.ANY, ) cluster.request.reset_mock() manufacturer_id = 0xFCFC await cluster.configure_reporting(0, 10, 20, 1, manufacturer=manufacturer_id) cluster.request.assert_called_with( True, 0x06, mock.ANY, mock.ANY, expect_reply=True, manufacturer=manufacturer_id, tsn=mock.ANY, ) assert cluster.request.call_count == 1 @pytest.mark.parametrize( "cluster_id, attr, data_type", ( (0, "zcl_version", 0x20), (0, "model", 0x42), (0, "device_enabled", 0x10), (0, "alarm_mask", 0x18), (0x0202, "fan_mode", 0x30), ), ) async def test_configure_reporting_types(cluster_id, attr, data_type, cluster_by_id): cluster = cluster_by_id(cluster_id) await cluster.configure_reporting(attr, 0x1234, 0x2345, 0xAA) assert cluster._endpoint.reply.call_count == 0 assert cluster._endpoint.request.call_count == 1 assert cluster.endpoint.request.call_args[0][2][6] == data_type async def test_command(cluster): await cluster.command(0x00) assert cluster._endpoint.request.call_count == 1 assert cluster._endpoint.request.call_args[0][1] == DEFAULT_TSN async def test_command_override_tsn(cluster): await cluster.command(0x00, tsn=22) assert cluster._endpoint.request.call_count == 1 assert cluster._endpoint.request.call_args[0][1] == 22 async def test_command_attr(cluster): await cluster.reset_fact_default() assert cluster._endpoint.request.call_count == 1 async def test_client_command_attr(client_cluster): await client_cluster.identify_query_response(timeout=0) assert client_cluster._endpoint.reply.call_count == 1 async def test_command_invalid_attr(cluster): with pytest.raises(AttributeError): await cluster.no_such_command() async def test_invalid_arguments_cluster_command(cluster): with pytest.raises(TypeError): await cluster.command(0x00, 1) async def test_invalid_arguments_cluster_client_command(client_cluster): with pytest.raises(TypeError): await client_cluster.client_command(0, 0, 0) def test_name(cluster): assert cluster.name == "Basic" def test_commands(cluster): assert cluster.commands == [cluster.ServerCommandDefs.reset_fact_default] def test_general_command(cluster): cluster.request = MagicMock() cluster.reply = MagicMock() cmd_id = 0x0C cluster.general_command(cmd_id, sentinel.start, sentinel.items, manufacturer=0x4567) assert cluster.reply.call_count == 0 assert cluster.request.call_count == 1 cluster.request.assert_called_with( True, cmd_id, mock.ANY, sentinel.start, sentinel.items, expect_reply=True, manufacturer=0x4567, tsn=mock.ANY, ) def test_general_command_reply(cluster): cluster.request = MagicMock() cluster.reply = MagicMock() cmd_id = 0x0D cluster.general_command(cmd_id, True, [], manufacturer=0x4567) assert cluster.request.call_count == 0 assert cluster.reply.call_count == 1 cluster.reply.assert_called_with( True, cmd_id, mock.ANY, True, [], manufacturer=0x4567, tsn=None ) cluster.request.reset_mock() cluster.reply.reset_mock() cluster.general_command(cmd_id, True, [], manufacturer=0x4567, tsn=sentinel.tsn) assert cluster.request.call_count == 0 assert cluster.reply.call_count == 1 cluster.reply.assert_called_with( True, cmd_id, mock.ANY, True, [], manufacturer=0x4567, tsn=sentinel.tsn ) def test_handle_cluster_request_handler(cluster): hdr = foundation.ZCLHeader.cluster(123, 0x00) cluster.handle_cluster_request(hdr, [sentinel.arg1, sentinel.arg2]) async def test_handle_cluster_general_request_disable_default_rsp(endpoint): hdr, values = endpoint.deserialize( 0, b"\x18\xCD\x0A\x01\xFF\x42\x25\x01\x21\x95\x0B\x04\x21\xA8\x43\x05\x21\x36\x00" b"\x06\x24\x02\x00\x05\x00\x00\x64\x29\xF8\x07\x65\x21\xD9\x0E\x66\x2B\x84\x87" b"\x01\x00\x0A\x21\x00\x00", ) cluster = endpoint.in_clusters[0] p1 = patch.object(cluster, "_update_attribute") p2 = patch.object(cluster, "general_command") with p1 as attr_lst_mock, p2 as general_cmd_mock: cluster.handle_cluster_general_request(hdr, values) await asyncio.sleep(0) assert attr_lst_mock.call_count > 0 assert general_cmd_mock.call_count == 0 with p1 as attr_lst_mock, p2 as general_cmd_mock: hdr.frame_control.disable_default_response = False cluster.handle_cluster_general_request(hdr, values) await asyncio.sleep(0) assert attr_lst_mock.call_count > 0 assert general_cmd_mock.call_count == 1 assert general_cmd_mock.call_args[1]["tsn"] == hdr.tsn async def test_handle_cluster_general_request_not_attr_report(cluster): hdr = foundation.ZCLHeader.general(1, foundation.GeneralCommand.Write_Attributes) p1 = patch.object(cluster, "_update_attribute") p2 = patch.object(cluster, "create_catching_task") with p1 as attr_lst_mock, p2 as response_mock: cluster.handle_cluster_general_request(hdr, [1, 2, 3]) await asyncio.sleep(0) assert attr_lst_mock.call_count == 0 assert response_mock.call_count == 0 async def test_write_attributes_undivided(cluster): with patch.object(cluster, "request", new=AsyncMock()): i = cluster.write_attributes_undivided({0: 5, "app_version": 4}) await i assert cluster.request.call_count == 1 async def test_configure_reporting_multiple(cluster): await cluster.configure_reporting(3, 5, 15, 20, manufacturer=0x2345) await cluster.configure_reporting_multiple({3: (5, 15, 20)}, manufacturer=0x2345) assert cluster.endpoint.request.call_count == 2 assert ( cluster.endpoint.request.call_args_list[0][0][2] == cluster.endpoint.request.call_args_list[1][0][2] ) async def test_configure_reporting_multiple_def_rsp(cluster): """Configure reporting returned a default response. May happen.""" cluster.endpoint.request.return_value = ( zcl.foundation.GeneralCommand.Configure_Reporting, zcl.foundation.Status.UNSUP_GENERAL_COMMAND, ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 1 assert cluster.unsupported_attributes == set() def _mk_cfg_rsp(responses: dict[int, zcl.foundation.Status]): """A helper to create a configure response record.""" cfg_response = zcl.foundation.ConfigureReportingResponse() for attrid, status in responses.items(): cfg_response.append( zcl.foundation.ConfigureReportingResponseRecord( status, zcl.foundation.ReportingDirection.ReceiveReports, attrid ) ) return [cfg_response] async def test_configure_reporting_multiple_single_success(cluster): """Configure reporting returned a single success response.""" cluster.endpoint.request.return_value = _mk_cfg_rsp( {0: zcl.foundation.Status.SUCCESS} ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 1 assert cluster.unsupported_attributes == set() async def test_configure_reporting_multiple_single_fail(cluster): """Configure reporting returned a single failure response.""" cluster.endpoint.request.return_value = _mk_cfg_rsp( {3: zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE} ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 1 assert cluster.unsupported_attributes == {"hw_version", 3} cluster.endpoint.request.return_value = _mk_cfg_rsp( {3: zcl.foundation.Status.SUCCESS} ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 2 assert cluster.unsupported_attributes == set() async def test_configure_reporting_multiple_single_unreportable(cluster): """Configure reporting returned a single failure response for unreportable attribute.""" cluster.endpoint.request.return_value = _mk_cfg_rsp( {4: zcl.foundation.Status.UNREPORTABLE_ATTRIBUTE} ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 1 assert cluster.unsupported_attributes == set() async def test_configure_reporting_multiple_both_unsupp(cluster): """Configure reporting returned unsupported attributes for both.""" cluster.endpoint.request.return_value = _mk_cfg_rsp( { 3: zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE, 4: zcl.foundation.Status.UNSUPPORTED_ATTRIBUTE, } ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 1 assert cluster.unsupported_attributes == {"hw_version", 3, "manufacturer", 4} cluster.endpoint.request.return_value = _mk_cfg_rsp( { 3: zcl.foundation.Status.SUCCESS, 4: zcl.foundation.Status.SUCCESS, } ) await cluster.configure_reporting_multiple( {3: (5, 15, 20), 4: (6, 16, 26)}, manufacturer=0x2345 ) assert cluster.endpoint.request.await_count == 2 assert cluster.unsupported_attributes == set() def test_unsupported_attr_add(cluster): """Test adding unsupported attributes.""" assert "manufacturer" not in cluster.unsupported_attributes assert 4 not in cluster.unsupported_attributes assert "model" not in cluster.unsupported_attributes assert 5 not in cluster.unsupported_attributes cluster.add_unsupported_attribute(4) assert "manufacturer" in cluster.unsupported_attributes assert 4 in cluster.unsupported_attributes cluster.add_unsupported_attribute("model") assert "model" in cluster.unsupported_attributes assert 5 in cluster.unsupported_attributes def test_unsupported_attr_add_no_reverse_attr_name(cluster): """Test adding unsupported attributes without corresponding reverse attr name.""" assert "no_such_attr" not in cluster.unsupported_attributes assert 0xDEED not in cluster.unsupported_attributes cluster.add_unsupported_attribute("no_such_attr") cluster.add_unsupported_attribute("no_such_attr") assert "no_such_attr" in cluster.unsupported_attributes cluster.add_unsupported_attribute(0xDEED) assert 0xDEED in cluster.unsupported_attributes def test_unsupported_attr_remove(cluster): """Test removing unsupported attributes.""" assert "manufacturer" not in cluster.unsupported_attributes assert 4 not in cluster.unsupported_attributes assert "model" not in cluster.unsupported_attributes assert 5 not in cluster.unsupported_attributes cluster.add_unsupported_attribute(4) assert "manufacturer" in cluster.unsupported_attributes assert 4 in cluster.unsupported_attributes cluster.add_unsupported_attribute("model") assert "model" in cluster.unsupported_attributes assert 5 in cluster.unsupported_attributes cluster.remove_unsupported_attribute(4) assert "manufacturer" not in cluster.unsupported_attributes assert 4 not in cluster.unsupported_attributes cluster.remove_unsupported_attribute("model") assert "model" not in cluster.unsupported_attributes assert 5 not in cluster.unsupported_attributes def test_unsupported_attr_remove_no_reverse_attr_name(cluster): """Test removing unsupported attributes without corresponding reverse attr name.""" assert "no_such_attr" not in cluster.unsupported_attributes assert 0xDEED not in cluster.unsupported_attributes cluster.add_unsupported_attribute("no_such_attr") assert "no_such_attr" in cluster.unsupported_attributes cluster.add_unsupported_attribute(0xDEED) assert 0xDEED in cluster.unsupported_attributes cluster.remove_unsupported_attribute("no_such_attr") assert "no_such_attr" not in cluster.unsupported_attributes cluster.remove_unsupported_attribute(0xDEED) assert 0xDEED not in cluster.unsupported_attributes def test_zcl_command_duplicate_name_prevention(): assert 0x1234 not in zcl.clusters.CLUSTERS_BY_ID with pytest.raises(TypeError): class TestCluster(zcl.Cluster): cluster_id = 0x1234 ep_attribute = "test_cluster" server_commands = { 0x00: foundation.ZCLCommandDef( name="command1", schema={}, direction=False ), 0x01: foundation.ZCLCommandDef( name="command1", schema={}, direction=False ), } def test_zcl_attridx_deprecation(cluster): with pytest.deprecated_call(): cluster.attridx with pytest.deprecated_call(): assert cluster.attridx is cluster.attributes_by_name def test_zcl_response_type_tuple_like(): req = ( zcl.clusters.general.OnOff(None) .commands_by_name["on_with_timed_off"] .schema( on_off_control=0, on_time=1, off_wait_time=2, ) ) on_off_control, on_time, off_wait_time = req assert req.on_off_control == on_off_control == req[0] == 0 assert req.on_time == on_time == req[1] == 1 assert req.off_wait_time == off_wait_time == req[2] == 2 assert req == (0, 1, 2) assert req == req assert req == req.replace() async def test_zcl_request_direction(): """Test that the request header's `direction` field is properly set.""" dev = MagicMock() ep = zigpy.endpoint.Endpoint(dev, 1) ep._device.get_sequence.return_value = DEFAULT_TSN ep.device.get_sequence.return_value = DEFAULT_TSN ep.request = AsyncMock() ep.add_input_cluster(zcl.clusters.general.OnOff.cluster_id) ep.add_input_cluster(zcl.clusters.lighting.Color.cluster_id) ep.add_output_cluster(zcl.clusters.general.OnOff.cluster_id) # Input cluster await ep.in_clusters[zcl.clusters.general.OnOff.cluster_id].on() hdr1, _ = foundation.ZCLHeader.deserialize(ep.request.mock_calls[0].args[2]) assert hdr1.direction == foundation.Direction.Client_to_Server ep.request.reset_mock() # Output cluster await ep.out_clusters[zcl.clusters.general.OnOff.cluster_id].on() hdr2, _ = foundation.ZCLHeader.deserialize(ep.request.mock_calls[0].args[2]) assert hdr2.direction == foundation.Direction.Server_to_Client # Color cluster that also uses `direction` as a kwarg await ep.light_color.move_to_hue( hue=0, direction=zcl.clusters.lighting.Color.Direction.Shortest_distance, transition_time=10, ) async def test_zcl_reply_direction(app_mock): """Test that the reply header's `direction` field is properly set.""" dev = zigpy.device.Device( application=app_mock, ieee=t.EUI64.convert("aa:bb:cc:dd:11:22:33:44"), nwk=0x1234, ) dev._send_sequence = DEFAULT_TSN ep = dev.add_endpoint(1) ep.add_input_cluster(zcl.clusters.general.OnOff.cluster_id) hdr = foundation.ZCLHeader( frame_control=foundation.FrameControl( frame_type=foundation.FrameType.GLOBAL_COMMAND, is_manufacturer_specific=0, direction=foundation.Direction.Server_to_Client, disable_default_response=0, reserved=0, ), tsn=87, command_id=foundation.GeneralCommand.Report_Attributes, ) attr = zcl.foundation.Attribute() attr.attrid = zcl.clusters.general.OnOff.AttributeDefs.on_off.id attr.value = zcl.foundation.TypeValue() attr.value.value = t.Bool.true cmd = foundation.GENERAL_COMMANDS[ foundation.GeneralCommand.Report_Attributes ].schema([attr]) ep.handle_message( profile=260, cluster=zcl.clusters.general.OnOff.cluster_id, hdr=hdr, args=cmd, ) await asyncio.sleep(0.1) packet = app_mock.send_packet.mock_calls[0].args[0] assert packet.cluster_id == zcl.clusters.general.OnOff.cluster_id # The direction is correct packet_hdr, _ = foundation.ZCLHeader.deserialize(packet.data.serialize()) assert packet_hdr.direction == foundation.Direction.Client_to_Server zigpy-0.62.3/tests/test_zcl_clusters.py000066400000000000000000000437561456054056700202660ustar00rootroot00000000000000import asyncio import re import pytest from zigpy import device import zigpy.endpoint import zigpy.types as types import zigpy.zcl as zcl import zigpy.zcl.clusters.security as sec from zigpy.zdo import types as zdo_t from .async_mock import AsyncMock, MagicMock, patch, sentinel IMAGE_SIZE = 0x2345 IMAGE_OFFSET = 0x2000 def test_registry(): for cluster_id, cluster in zcl.Cluster._registry.items(): assert 0 <= getattr(cluster, "cluster_id", -1) <= 65535 assert cluster_id == cluster.cluster_id assert issubclass(cluster, zcl.Cluster) def test_attributes(): for _cluster_id, cluster in zcl.Cluster._registry.items(): for attrid, attr in cluster.attributes.items(): assert 0 <= attrid <= 0xFFFF assert isinstance(attr, zcl.foundation.ZCLAttributeDef) assert attr.id == attrid assert attr.name assert attr.type assert callable(attr.type.deserialize) assert callable(attr.type.serialize) def _test_commands(cmdattr): for _cluster_id, cluster in zcl.Cluster._registry.items(): for cmdid, cmdspec in getattr(cluster, cmdattr).items(): assert 0 <= cmdid <= 0xFF assert cmdspec.id == cmdid assert isinstance(cmdspec, zcl.foundation.ZCLCommandDef) assert issubclass(cmdspec.schema, types.Struct) for field in cmdspec.schema.fields: assert callable(field.type.deserialize) assert callable(field.type.serialize) def test_server_commands(): _test_commands("server_commands") def test_client_commands(): _test_commands("client_commands") def test_ep_attributes(): seen = set() for _cluster_id, cluster in zcl.Cluster._registry.items(): assert isinstance(cluster.ep_attribute, str) assert re.match(r"^[a-z_][a-z0-9_]*$", cluster.ep_attribute) assert cluster.ep_attribute not in seen seen.add(cluster.ep_attribute) ep = zigpy.endpoint.Endpoint(None, 1) assert not hasattr(ep, cluster.ep_attribute) async def test_time_cluster(): ep = MagicMock() ep.reply = AsyncMock() t = zcl.Cluster._registry[0x000A](ep) hdr_general = zcl.foundation.ZCLHeader.general tsn = 123 t.handle_message(hdr_general(tsn, 1), [[0]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 0 t.handle_message(hdr_general(tsn, 0), [[0]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 1 assert ep.reply.call_args[0][2][3] == 0 t.handle_message(hdr_general(tsn, 0), [[1]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 2 assert ep.reply.call_args[0][2][3] == 1 t.handle_message(hdr_general(tsn, 0), [[2]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 3 assert ep.reply.call_args[0][2][3] == 2 t.handle_message(hdr_general(tsn, 0), [[0, 1, 2]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 4 assert ep.reply.call_args[0][2][3] == 0 t.handle_message(hdr_general(tsn, 0), [[7]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 5 assert ep.reply.call_args[0][2][3] == 7 async def test_time_cluster_unsupported(): ep = MagicMock() ep.reply = AsyncMock() t = zcl.Cluster._registry[0x000A](ep) hdr_general = zcl.foundation.ZCLHeader.general tsn = 123 t.handle_cluster_general_request(hdr_general(tsn, 0), [[199, 128]]) await asyncio.sleep(0.01) assert ep.reply.call_count == 1 assert ep.reply.call_args[0][2][-6:] == b"\xc7\x00\x86\x80\x00\x86" @pytest.fixture def dev(monkeypatch, app_mock): monkeypatch.setattr(device, "APS_REPLY_TIMEOUT_EXTENDED", 0.1) ieee = types.EUI64(map(types.uint8_t, [0, 1, 2, 3, 4, 5, 6, 7])) dev = device.Device(app_mock, ieee, 65535) node_desc = zdo_t.NodeDescriptor(1, 1, 1, 4, 5, 6, 7, 8) with patch.object( dev.zdo, "Node_Desc_req", new=AsyncMock(return_value=(0, 0xFFFF, node_desc)) ): yield dev @pytest.fixture def ota_cluster(dev): ep = dev.add_endpoint(1) # ep.device.application.ota = MagicMock(spec_set=ota.OTA) cluster = zcl.Cluster._registry[0x0019](ep) with patch.object(cluster, "reply", AsyncMock()), patch.object( cluster, "request", AsyncMock() ): yield cluster async def test_ota_handle_cluster_req(ota_cluster): ota_cluster._handle_cluster_request = AsyncMock() hdr = zigpy.zcl.foundation.ZCLHeader.cluster(123, 0x00) ota_cluster.handle_cluster_request(hdr, sentinel.args) assert ota_cluster._handle_cluster_request.call_count == 1 async def test_ota_handle_cluster_req_wrapper(ota_cluster, caplog): ota_cluster._handle_query_next_image = AsyncMock() ota_cluster._handle_image_block = AsyncMock() ota_cluster._handle_upgrade_end = AsyncMock() hdr = zigpy.zcl.foundation.ZCLHeader.cluster(123, 0x01) await ota_cluster._handle_cluster_request(hdr, [sentinel.args]) assert ota_cluster._handle_query_next_image.call_count == 1 assert ota_cluster._handle_query_next_image.call_args[0][0] == sentinel.args assert ota_cluster._handle_image_block.call_count == 0 assert ota_cluster._handle_upgrade_end.call_count == 0 ota_cluster._handle_query_next_image.reset_mock() ota_cluster._handle_image_block.reset_mock() ota_cluster._handle_upgrade_end.reset_mock() # This command isn't currently handled hdr.command_id = 0x08 await ota_cluster._handle_cluster_request(hdr, [sentinel.just_args]) assert ota_cluster._handle_query_next_image.call_count == 0 assert ota_cluster._handle_image_block.call_count == 0 assert ota_cluster._handle_upgrade_end.call_count == 0 # This command doesn't exist hdr.command_id = 0x28 await ota_cluster._handle_cluster_request(hdr, [sentinel.just_args]) assert ota_cluster._handle_query_next_image.call_count == 0 assert ota_cluster._handle_image_block.call_count == 0 assert ota_cluster._handle_upgrade_end.call_count == 0 assert "Unknown OTA command id" in caplog.text def _ota_next_image(cluster, has_image=True, upgradeable=False): async def get_ota_mock(*args): if upgradeable: img = MagicMock() img.should_update = MagicMock(return_value=True) img.key.manufacturer_id = sentinel.manufacturer_id img.key.image_type = sentinel.image_type img.version = sentinel.image_version img.header.image_size = sentinel.image_size elif has_image: img = MagicMock() img.should_update.return_value = False else: img = None return img cluster.endpoint.device.application.ota.get_ota_image = MagicMock( side_effect=get_ota_mock ) return cluster._handle_query_next_image( sentinel.field_ctrl, sentinel.manufacturer_id, sentinel.image_type, sentinel.current_file_version, sentinel.hw_version, tsn=0x21, ) async def test_ota_handle_query_next_image_no_img(ota_cluster): ota_cluster.query_next_image_response = AsyncMock() ota_cluster.endpoint.device.ota_in_progress = False await _ota_next_image(ota_cluster, has_image=False, upgradeable=False) assert ota_cluster.query_next_image_response.call_count == 1 assert ( ota_cluster.query_next_image_response.call_args[0][0] == zcl.foundation.Status.NO_IMAGE_AVAILABLE ) assert len(ota_cluster.query_next_image_response.call_args[0]) == 1 async def test_ota_handle_query_next_image_not_upgradeable(ota_cluster): ota_cluster.query_next_image_response = AsyncMock() ota_cluster.endpoint.device.ota_in_progress = False await _ota_next_image(ota_cluster, has_image=True, upgradeable=False) assert ota_cluster.query_next_image_response.call_count == 1 assert ( ota_cluster.query_next_image_response.call_args[0][0] == zcl.foundation.Status.NO_IMAGE_AVAILABLE ) assert len(ota_cluster.query_next_image_response.call_args[0]) == 1 async def test_ota_handle_query_next_image_upgradeable(ota_cluster): ota_cluster.query_next_image_response = AsyncMock() ota_cluster.endpoint.device.ota_in_progress = False class Listener: device_ota_update_available = MagicMock() listener = Listener() ota_cluster.endpoint.device.add_listener(listener) await _ota_next_image(ota_cluster, has_image=True, upgradeable=True) assert ota_cluster.query_next_image_response.call_count == 1 assert ( ota_cluster.query_next_image_response.call_args[0][0] == zcl.foundation.Status.NO_IMAGE_AVAILABLE ) assert len(ota_cluster.query_next_image_response.call_args[0]) == 1 assert listener.device_ota_update_available.call_count == 1 def test_ias_zone_type(): extra = b"\xaa\x55" zone, rest = sec.IasZone.ZoneType.deserialize(b"\x0d\x00" + extra) assert rest == extra assert zone is sec.IasZone.ZoneType.Motion_Sensor zone, rest = sec.IasZone.ZoneType.deserialize(b"\x81\x81" + extra) assert rest == extra assert zone.name.startswith("manufacturer_specific") assert zone.value == 0x8181 def test_ias_ace_audible_notification(): extra = b"\xaa\x55" notification_type, rest = sec.IasAce.AudibleNotification.deserialize( b"\x00" + extra ) assert rest == extra assert notification_type is sec.IasAce.AudibleNotification.Mute notification_type, rest = sec.IasAce.AudibleNotification.deserialize( b"\x81" + extra ) assert rest == extra assert notification_type.name.startswith("manufacturer_specific") assert notification_type.value == 0x81 def test_basic_cluster_power_source(): extra = b"The rest of the owl\xaa\x55" pwr_src, rest = zcl.clusters.general.Basic.PowerSource.deserialize(b"\x81" + extra) assert rest == extra assert pwr_src == zcl.clusters.general.Basic.PowerSource.Mains_single_phase assert pwr_src == 0x01 assert pwr_src.value == 0x01 assert pwr_src.battery_backup @pytest.mark.parametrize( "raw, mode, name", ( (0x00, 0, "Stop"), (0x01, 0, "Stop"), (0x02, 0, "Stop"), (0x03, 0, "Stop"), (0x30, 3, "Emergency"), (0x31, 3, "Emergency"), (0x32, 3, "Emergency"), (0x33, 3, "Emergency"), ), ) def test_security_iaswd_warning_mode(raw, mode, name): """Test warning command class of IasWD cluster.""" def _test(warning, data): assert warning.serialize() == data assert warning == raw assert warning.mode == mode assert warning.mode.name == name warning.mode = mode assert warning.serialize() == data assert warning.mode == mode data = types.uint8_t(raw).serialize() _test(sec.IasWd.Warning(raw), data) extra = b"The rest of the owl\xaa\x55" warn, rest = sec.IasWd.Warning.deserialize(data + extra) assert rest == extra _test(warn, data) repr(warn) def test_security_iaswd_warning_mode_2(): """Test warning command class of IasWD cluster.""" def _test(data, raw, mode, name): warning, _ = sec.IasWd.Warning.deserialize(data) assert warning.serialize() == data assert warning == raw assert warning.mode == mode assert warning.mode.name == name warning.mode = mode assert warning.serialize() == data assert warning.mode == mode for mode in sec.IasWd.Warning.WarningMode: for other in range(16): raw = mode << 4 | other data = types.uint8_t(raw).serialize() _test(data, raw, mode.value, mode.name) def test_security_iaswd_warning_strobe(): """Test strobe of warning command class of IasWD cluster.""" for strobe in sec.IasWd.Warning.Strobe: for mode in range(16): for siren in range(4): raw = mode << 4 | siren raw |= strobe.value << 2 data = types.uint8_t(raw).serialize() warning, _ = sec.IasWd.Warning.deserialize(data) assert warning.serialize() == data assert warning == raw assert warning.strobe == strobe.value assert warning.strobe.name == strobe.name warning.strobe = strobe assert warning.serialize() == data assert warning.strobe == strobe.value def test_security_iaswd_warning_siren(): """Test siren of warning command class of IasWD cluster.""" for siren in sec.IasWd.Warning.SirenLevel: for mode in range(16): for strobe in range(4): raw = mode << 4 | (strobe << 2) raw |= siren.value data = types.uint8_t(raw).serialize() warning, _ = sec.IasWd.Warning.deserialize(data) assert warning.serialize() == data assert warning == raw assert warning.level == siren.value assert warning.level.name == siren.name warning.level = siren assert warning.serialize() == data assert warning.level == siren.value @pytest.mark.parametrize( "raw, mode, name", ( (0x00, 0, "Armed"), (0x01, 0, "Armed"), (0x02, 0, "Armed"), (0x03, 0, "Armed"), (0x10, 1, "Disarmed"), (0x11, 1, "Disarmed"), (0x12, 1, "Disarmed"), (0x13, 1, "Disarmed"), ), ) def test_security_iaswd_squawk_mode(raw, mode, name): """Test squawk command class of IasWD cluster.""" def _test(squawk, data): assert squawk.serialize() == data assert squawk == raw assert squawk.mode == mode assert squawk.mode.name == name squawk.mode = mode assert squawk.serialize() == data assert squawk.mode == mode data = types.uint8_t(raw).serialize() _test(sec.IasWd.Squawk(raw), data) extra = b"The rest of the owl\xaa\x55" squawk, rest = sec.IasWd.Squawk.deserialize(data + extra) assert rest == extra _test(squawk, data) repr(squawk) def test_security_iaswd_squawk_strobe(): """Test strobe of squawk command class of IasWD cluster.""" for strobe in sec.IasWd.Squawk.Strobe: for mode in range(16): for level in range(4): raw = mode << 4 | level raw |= strobe.value << 3 data = types.uint8_t(raw).serialize() squawk, _ = sec.IasWd.Squawk.deserialize(data) assert squawk.serialize() == data assert squawk == raw assert squawk.strobe == strobe.value assert squawk.strobe == strobe assert squawk.strobe.name == strobe.name squawk.strobe = strobe assert squawk.serialize() == data assert squawk.strobe == strobe def test_security_iaswd_squawk_level(): """Test level of squawk command class of IasWD cluster.""" for level in sec.IasWd.Squawk.SquawkLevel: for other in range(64): raw = other << 2 | level.value data = types.uint8_t(raw).serialize() squawk, _ = sec.IasWd.Squawk.deserialize(data) assert squawk.serialize() == data assert squawk == raw assert squawk.level == level.value assert squawk.level == level assert squawk.level.name == level.name squawk.level = level assert squawk.serialize() == data assert squawk.level == level def test_hvac_thermostat_system_type(): """Test system_type class.""" hvac = zcl.clusters.hvac sys_type = hvac.Thermostat.SystemType(0x00) assert sys_type.cooling_system_stage == hvac.CoolingSystemStage.Cool_Stage_1 assert sys_type.heating_system_stage == hvac.HeatingSystemStage.Heat_Stage_1 assert sys_type.heating_fuel_source == hvac.HeatingFuelSource.Electric assert sys_type.heating_system_type == hvac.HeatingSystemType.Conventional sys_type = hvac.Thermostat.SystemType(0x35) assert sys_type.cooling_system_stage == hvac.CoolingSystemStage.Cool_Stage_2 assert sys_type.heating_system_stage == hvac.HeatingSystemStage.Heat_Stage_2 assert sys_type.heating_fuel_source == hvac.HeatingFuelSource.Gas assert sys_type.heating_system_type == hvac.HeatingSystemType.Heat_Pump @patch("zigpy.zcl.Cluster.send_default_rsp") async def test_ias_zone(send_rsp_mock): """Test sending default response on zone status notification.""" ep = MagicMock() ep.reply = AsyncMock() t = zcl.Cluster._registry[sec.IasZone.cluster_id](ep, is_server=False) # suppress default response hdr, args = t.deserialize(b"\tK\x00&\x00\x00\x00\x00\x00") hdr.frame_control.disable_default_response = True t.handle_message(hdr, args) assert send_rsp_mock.call_count == 0 # this should generate a default response hdr.frame_control.disable_default_response = False t.handle_message(hdr, args) assert send_rsp_mock.call_count == 0 t = zcl.Cluster._registry[sec.IasZone.cluster_id](ep, is_server=True) # suppress default response hdr, args = t.deserialize(b"\tK\x00&\x00\x00\x00\x00\x00") hdr.frame_control.disable_default_response = True t.handle_message(hdr, args) assert send_rsp_mock.call_count == 0 # this should generate a default response hdr.frame_control.disable_default_response = False t.handle_message(hdr, args) assert send_rsp_mock.call_count == 1 def test_ota_image_block_field_control(): """Test OTA image_block with field control deserializes properly.""" data = bytes.fromhex("01d403020b101d01001f000100000000400000") ep = MagicMock() cluster = zcl.clusters.general.Ota(ep) hdr, response = cluster.deserialize(data) assert hdr.serialize() + response.serialize() == data image_block = cluster.commands_by_name["image_block"].schema assert response == image_block( field_control=image_block.FieldControl.MinimumBlockPeriod, manufacturer_code=4107, image_type=285, file_version=0x01001F00, file_offset=0, maximum_data_size=64, minimum_block_period=0, ) assert response.request_node_addr is None zigpy-0.62.3/tests/test_zcl_foundation.py000066400000000000000000000614071456054056700205610ustar00rootroot00000000000000import pytest import zigpy.types as t from zigpy.zcl import foundation def test_typevalue(): tv = foundation.TypeValue() tv.type = 0x20 tv.value = t.uint8_t(99) ser = tv.serialize() r = repr(tv) assert r.startswith("TypeValue(") and r.endswith(")") assert "type=uint8_t" in r assert "value=99" in r tv2, data = foundation.TypeValue.deserialize(ser) assert data == b"" assert tv2.type == tv.type assert tv2.value == tv.value tv3 = foundation.TypeValue(tv2) assert tv3.type == tv.type assert tv3.value == tv.value assert tv3 == tv2 tv4 = foundation.TypeValue() tv4.type = 0x42 tv4.value = t.CharacterString("test") assert "CharacterString" in str(tv4) assert "'test'" in str(tv4) tv5 = foundation.TypeValue() tv5.type = 0x42 tv5.value = t.CharacterString("test") assert tv5 == tv5 assert tv5 == tv4 assert tv5 != tv3 def test_read_attribute_record(): orig = b"\x00\x00\x00\x20\x99" rar, data = foundation.ReadAttributeRecord.deserialize(orig) assert data == b"" assert rar.status == 0 assert isinstance(rar.value, foundation.TypeValue) assert isinstance(rar.value.value, t.uint8_t) assert rar.value.value == 0x99 r = repr(rar) assert len(r) > 5 assert repr(foundation.Status.SUCCESS) in r ser = rar.serialize() assert ser == orig def test_attribute_reporting_config_0(): arc = foundation.AttributeReportingConfig() arc.direction = foundation.ReportingDirection.SendReports arc.attrid = 99 arc.datatype = 0x20 arc.min_interval = 10 arc.max_interval = 20 arc.reportable_change = 30 ser = arc.serialize() arc2, data = foundation.AttributeReportingConfig.deserialize(ser) assert data == b"" assert arc2.direction == arc.direction assert arc2.attrid == arc.attrid assert arc2.datatype == arc.datatype assert arc2.min_interval == arc.min_interval assert arc2.max_interval == arc.max_interval assert arc.reportable_change == arc.reportable_change assert repr(arc) assert repr(arc) == repr(arc2) def test_attribute_reporting_config_1(): arc = foundation.AttributeReportingConfig() arc.direction = 1 arc.attrid = 99 arc.timeout = 0x7E ser = arc.serialize() arc2, data = foundation.AttributeReportingConfig.deserialize(ser) assert data == b"" assert arc2.direction == arc.direction assert arc2.timeout == arc.timeout assert repr(arc) def test_attribute_reporting_config_only_dir_and_attrid(): arc = foundation.AttributeReportingConfig() arc.direction = foundation.ReportingDirection.SendReports arc.attrid = 99 ser = arc.serialize(_only_dir_and_attrid=True) arc2, data = foundation.AttributeReportingConfig.deserialize( ser, _only_dir_and_attrid=True ) assert data == b"" assert arc2.direction == arc.direction assert arc2.attrid == arc.attrid assert repr(arc) assert repr(arc) == repr(arc2) def test_write_attribute_status_record(): attr_id = b"\x01\x00" extra = b"12da-" res, d = foundation.WriteAttributesStatusRecord.deserialize( b"\x00" + attr_id + extra ) assert res.status == foundation.Status.SUCCESS assert res.attrid is None assert d == attr_id + extra r = repr(res) assert r.startswith(foundation.WriteAttributesStatusRecord.__name__) assert "status" in r assert "attrid" not in r res, d = foundation.WriteAttributesStatusRecord.deserialize( b"\x87" + attr_id + extra ) assert res.status == foundation.Status.INVALID_VALUE assert res.attrid == 0x0001 assert d == extra r = repr(res) assert "status" in r assert "attrid" in r rec = foundation.WriteAttributesStatusRecord(foundation.Status.SUCCESS, 0xAABB) assert rec.serialize() == b"\x00" rec.status = foundation.Status.UNSUPPORTED_ATTRIBUTE assert rec.serialize()[0:1] == foundation.Status.UNSUPPORTED_ATTRIBUTE.serialize() assert rec.serialize()[1:] == b"\xbb\xaa" def test_configure_reporting_response_serialization(): # success status only res, d = foundation.ConfigureReportingResponseRecord.deserialize(b"\x00") assert res.status == foundation.Status.SUCCESS assert res.direction is None assert res.attrid is None assert d == b"" # success + direction and attr id direction_attr_id = b"\x00\x01\x10" extra = b"12da-" res, d = foundation.ConfigureReportingResponseRecord.deserialize( b"\x00" + direction_attr_id + extra ) assert res.status == foundation.Status.SUCCESS assert res.direction is foundation.ReportingDirection.SendReports assert res.attrid == 0x1001 assert d == extra r = repr(res) assert r.startswith(foundation.ConfigureReportingResponseRecord.__name__ + "(") assert "status" in r assert "direction" not in r assert "attrid" not in r # failure record deserialization res, d = foundation.ConfigureReportingResponseRecord.deserialize( b"\x8c" + direction_attr_id + extra ) assert res.status == foundation.Status.UNREPORTABLE_ATTRIBUTE assert res.direction is not None assert res.attrid == 0x1001 assert d == extra r = repr(res) assert "status" in r assert "direction" in r assert "attrid" in r # successful record serializes only Status rec = foundation.ConfigureReportingResponseRecord( foundation.Status.SUCCESS, 0x00, 0xAABB ) assert rec.serialize() == b"\x00" rec.status = foundation.Status.UNREPORTABLE_ATTRIBUTE assert rec.serialize()[0:1] == foundation.Status.UNREPORTABLE_ATTRIBUTE.serialize() assert rec.serialize()[1:] == b"\x00\xbb\xaa" def test_status_undef(): data = b"\xff" extra = b"extra" status, rest = foundation.Status.deserialize(data + extra) assert rest == extra assert status == 0xFF assert status.value == 0xFF assert status.name == "undefined_0xff" assert isinstance(status, foundation.Status) def test_frame_control(): """Test FrameControl frame_type.""" extra = b"abcd\xaa\x55" frc, rest = foundation.FrameControl.deserialize(b"\x00" + extra) assert rest == extra assert frc.frame_type == foundation.FrameType.GLOBAL_COMMAND frc, rest = foundation.FrameControl.deserialize(b"\x01" + extra) assert rest == extra assert frc.frame_type == foundation.FrameType.CLUSTER_COMMAND r = repr(frc) assert isinstance(r, str) def test_frame_control_general(): frc = foundation.FrameControl.general( direction=foundation.Direction.Client_to_Server ) assert frc.is_cluster is False assert frc.is_general is True data = frc.serialize() assert data == b"\x00" assert not frc.is_manufacturer_specific frc.is_manufacturer_specific = False assert frc.serialize() == b"\x00" frc.is_manufacturer_specific = True assert frc.serialize() == b"\x04" frc = foundation.FrameControl.general( direction=foundation.Direction.Client_to_Server ) assert frc.direction == foundation.Direction.Client_to_Server assert frc.serialize() == b"\x00" frc.direction = foundation.Direction.Server_to_Client assert frc.serialize() == b"\x08" assert ( foundation.FrameControl.general( direction=foundation.Direction.Server_to_Client ).serialize() == b"\x18" ) frc = foundation.FrameControl.general( direction=foundation.Direction.Client_to_Server ) assert not frc.disable_default_response assert frc.serialize() == b"\x00" frc.disable_default_response = False assert frc.serialize() == b"\x00" frc.disable_default_response = True assert frc.serialize() == b"\x10" def test_frame_control_cluster(): frc = foundation.FrameControl.cluster( direction=foundation.Direction.Client_to_Server ) assert frc.is_cluster is True assert frc.is_general is False data = frc.serialize() assert data == b"\x01" assert not frc.is_manufacturer_specific frc.is_manufacturer_specific = False assert frc.serialize() == b"\x01" frc.is_manufacturer_specific = True assert frc.serialize() == b"\x05" frc = foundation.FrameControl.cluster( direction=foundation.Direction.Client_to_Server ) assert frc.direction == foundation.Direction.Client_to_Server assert frc.serialize() == b"\x01" frc.direction = foundation.Direction.Client_to_Server assert frc.serialize() == b"\x01" frc.direction = foundation.Direction.Server_to_Client assert frc.serialize() == b"\x09" assert ( foundation.FrameControl.cluster( direction=foundation.Direction.Server_to_Client ).serialize() == b"\x19" ) frc = foundation.FrameControl.cluster( direction=foundation.Direction.Client_to_Server ) assert not frc.disable_default_response assert frc.serialize() == b"\x01" frc.disable_default_response = False assert frc.serialize() == b"\x01" frc.disable_default_response = True assert frc.serialize() == b"\x11" def test_frame_header(): """Test frame header deserialization.""" data = b"\x1c_\x11\xc0\n" extra = b"\xaa\xaa\x55\x55" hdr, rest = foundation.ZCLHeader.deserialize(data + extra) assert rest == extra assert hdr.command_id == 0x0A assert hdr.direction == foundation.Direction.Server_to_Client assert hdr.manufacturer == 0x115F assert hdr.tsn == 0xC0 assert hdr.serialize() == data # check no manufacturer hdr.frame_control.is_manufacturer_specific = False assert hdr.serialize() == b"\x18\xc0\n" r = repr(hdr) assert isinstance(r, str) def test_frame_header_general(): """Test frame header general command.""" (tsn, cmd_id, manufacturer) = (0x11, 0x15, 0x3344) hdr = foundation.ZCLHeader.general(tsn, cmd_id, manufacturer) assert hdr.frame_control.frame_type == foundation.FrameType.GLOBAL_COMMAND assert hdr.command_id == cmd_id assert hdr.tsn == tsn assert hdr.manufacturer == manufacturer assert hdr.frame_control.is_manufacturer_specific is True hdr.manufacturer = None assert hdr.manufacturer is None assert hdr.frame_control.is_manufacturer_specific is False def test_frame_header_cluster(): """Test frame header cluster command.""" (tsn, cmd_id, manufacturer) = (0x11, 0x16, 0x3344) hdr = foundation.ZCLHeader.cluster( tsn=tsn, command_id=cmd_id, manufacturer=manufacturer ) assert hdr.frame_control.frame_type == foundation.FrameType.CLUSTER_COMMAND assert hdr.command_id == cmd_id assert hdr.tsn == tsn assert hdr.manufacturer == manufacturer assert hdr.frame_control.is_manufacturer_specific is True hdr.manufacturer = None assert hdr.manufacturer is None assert hdr.frame_control.is_manufacturer_specific is False def test_frame_header_disable_manufacturer_id(): """Test frame header manufacturer ID can be disabled with NO_MANUFACTURER_ID.""" hdr = foundation.ZCLHeader.cluster(tsn=123, command_id=0x12, manufacturer=None) assert hdr.manufacturer is None hdr.manufacturer = 0x1234 assert hdr.manufacturer == 0x1234 hdr.manufacturer = foundation.ZCLHeader.NO_MANUFACTURER_ID assert hdr.manufacturer is None hdr2 = foundation.ZCLHeader.cluster( tsn=123, command_id=0x12, manufacturer=foundation.ZCLHeader.NO_MANUFACTURER_ID ) assert hdr2.manufacturer is None def test_data_types(): """Test data types mappings.""" assert len(foundation.DATA_TYPES) == len(foundation.DATA_TYPES._idx_by_class) data_types_set = {d[1] for d in foundation.DATA_TYPES.values()} dt_2_id_set = set(foundation.DATA_TYPES._idx_by_class.keys()) assert data_types_set == dt_2_id_set def test_attribute_report(): a = foundation.AttributeReportingConfig() a.direction = 0x01 a.attrid = 0xAA55 a.timeout = 900 b = foundation.AttributeReportingConfig(a) assert a.attrid == b.attrid assert a.direction == b.direction assert a.timeout == b.timeout def test_pytype_to_datatype_derived_enums(): """Test pytype_to_datatype_id lookup for derived enums.""" class e_1(t.enum8): pass class e_2(t.enum8): pass class e_3(t.enum16): pass enum8_id = foundation.DATA_TYPES.pytype_to_datatype_id(t.enum8) enum16_id = foundation.DATA_TYPES.pytype_to_datatype_id(t.enum16) assert foundation.DATA_TYPES.pytype_to_datatype_id(e_1) == enum8_id assert foundation.DATA_TYPES.pytype_to_datatype_id(e_2) == enum8_id assert foundation.DATA_TYPES.pytype_to_datatype_id(e_3) == enum16_id assert foundation.DATA_TYPES.pytype_to_datatype_id(e_2) == enum8_id assert foundation.DATA_TYPES.pytype_to_datatype_id(e_3) == enum16_id def test_pytype_to_datatype_derived_bitmaps(): """Test pytype_to_datatype_id lookup for derived enums.""" class b_1(t.bitmap8): pass class b_2(t.bitmap8): pass class b_3(t.bitmap16): pass bitmap8_id = foundation.DATA_TYPES.pytype_to_datatype_id(t.bitmap8) bitmap16_id = foundation.DATA_TYPES.pytype_to_datatype_id(t.bitmap16) assert foundation.DATA_TYPES.pytype_to_datatype_id(b_1) == bitmap8_id assert foundation.DATA_TYPES.pytype_to_datatype_id(b_2) == bitmap8_id assert foundation.DATA_TYPES.pytype_to_datatype_id(b_3) == bitmap16_id assert foundation.DATA_TYPES.pytype_to_datatype_id(b_2) == bitmap8_id assert foundation.DATA_TYPES.pytype_to_datatype_id(b_3) == bitmap16_id def test_ptype_to_datatype_lvlist(): """Test pytype for Structure.""" data = b"L\x06\x00\x10\x00!\xce\x0b!\xa8\x01$\x00\x00\x00\x00\x00!\xbdJ ]" extra = b"\xaa\x55extra\x00" result, rest = foundation.TypeValue.deserialize(data + extra) assert rest == extra assert foundation.DATA_TYPES.pytype_to_datatype_id(result.value.__class__) == 0x4C assert foundation.DATA_TYPES.pytype_to_datatype_id(foundation.ZCLStructure) == 0x4C class _Similar(t.LVList, item_type=foundation.TypeValue, length_type=t.uint16_t): pass assert foundation.DATA_TYPES.pytype_to_datatype_id(_Similar) == 0xFF def test_ptype_to_datatype_notype(): """Test pytype for NoData.""" class ZigpyUnknown: pass assert foundation.DATA_TYPES.pytype_to_datatype_id(ZigpyUnknown) == 0xFF def test_write_attrs_response_deserialize(): """Test deserialization.""" data = b"\x00" extra = b"\xaa\x55" r, rest = foundation.WriteAttributesResponse.deserialize(data + extra) assert len(r) == 1 assert r[0].status == foundation.Status.SUCCESS assert rest == extra data = b"\x86\x34\x12\x87\x35\x12" r, rest = foundation.WriteAttributesResponse.deserialize(data + extra) assert len(r) == 2 assert rest == extra assert r[0].status == foundation.Status.UNSUPPORTED_ATTRIBUTE assert r[0].attrid == 0x1234 assert r[1].status == foundation.Status.INVALID_VALUE assert r[1].attrid == 0x1235 @pytest.mark.parametrize( "attributes, data", ( ({4: 0, 5: 0, 3: 0}, b"\x00"), ({4: 0, 5: 0, 3: 0x86}, b"\x86\x03\x00"), ({4: 0x87, 5: 0, 3: 0x86}, b"\x87\x04\x00\x86\x03\x00"), ({4: 0x87, 5: 0x86, 3: 0x86}, b"\x87\x04\x00\x86\x05\x00\x86\x03\x00"), ), ) def test_write_attrs_response_serialize(attributes, data): """Test WriteAttributes Response serialization.""" r = foundation.WriteAttributesResponse() for attr_id, status in attributes.items(): rec = foundation.WriteAttributesStatusRecord() rec.status = status rec.attrid = attr_id r.append(rec) assert r.serialize() == data def test_configure_reporting_response_deserialize(): """Test deserialization.""" data = b"\x00" r, rest = foundation.ConfigureReportingResponse.deserialize(data) assert len(r) == 1 assert r[0].status == foundation.Status.SUCCESS assert r[0].direction is None assert r[0].attrid is None assert rest == b"" data = b"\x00" extra = b"\x01\xaa\x55" r, rest = foundation.ConfigureReportingResponse.deserialize(data + extra) assert len(r) == 1 assert r[0].status == foundation.Status.SUCCESS assert r[0].direction == foundation.ReportingDirection.ReceiveReports assert r[0].attrid == 0x55AA assert rest == b"" data = b"\x86\x01\x34\x12\x87\x01\x35\x12" r, rest = foundation.ConfigureReportingResponse.deserialize(data) assert len(r) == 2 assert rest == b"" assert r[0].status == foundation.Status.UNSUPPORTED_ATTRIBUTE assert r[0].attrid == 0x1234 assert r[1].status == foundation.Status.INVALID_VALUE assert r[1].attrid == 0x1235 with pytest.raises(ValueError): foundation.ConfigureReportingResponse.deserialize(data + extra) def test_configure_reporting_response_serialize_empty(): r = foundation.ConfigureReportingResponse() # An empty configure reporting response doesn't make sense with pytest.raises(ValueError): r.serialize() @pytest.mark.parametrize( "attributes, data", ( ({4: 0, 5: 0, 3: 0}, b"\x00"), ({4: 0, 5: 0, 3: 0x86}, b"\x86\x01\x03\x00"), ({4: 0x87, 5: 0, 3: 0x86}, b"\x87\x01\x04\x00\x86\x01\x03\x00"), ( {4: 0x87, 5: 0x86, 3: 0x86}, b"\x87\x01\x04\x00\x86\x01\x05\x00\x86\x01\x03\x00", ), ), ) def test_configure_reporting_response_serialize(attributes, data): """Test ConfigureReporting Response serialization.""" r = foundation.ConfigureReportingResponse() for attr_id, status in attributes.items(): rec = foundation.ConfigureReportingResponseRecord() rec.status = status rec.direction = 0x01 rec.attrid = attr_id r.append(rec) assert r.serialize() == data def test_status_enum(): """Test Status enums chaining.""" status_names = [e.name for e in foundation.Status] aps_names = [e.name for e in t.APSStatus] nwk_names = [e.name for e in t.NWKStatus] mac_names = [e.name for e in t.MACStatus] status = foundation.Status(0x98) assert status.name in status_names assert status.name not in aps_names assert status.name not in nwk_names assert status.name not in mac_names status = foundation.Status(0xAE) assert status.name not in status_names assert status.name in aps_names assert status.name not in nwk_names assert status.name not in mac_names status = foundation.Status(0xD0) assert status.name not in status_names assert status.name not in aps_names assert status.name in nwk_names assert status.name not in mac_names status = foundation.Status(0xE9) assert status.name not in status_names assert status.name not in aps_names assert status.name not in nwk_names assert status.name in mac_names status = foundation.Status(0xFF) assert status.name not in status_names assert status.name not in aps_names assert status.name not in nwk_names assert status.name not in mac_names assert status.name == "undefined_0xff" def test_schema(): """Test schema parameter parsing""" bad_s = foundation.ZCLCommandDef( id=0x12, name="test", schema={ "uh oh": t.uint16_t, }, direction=foundation.Direction.Client_to_Server, ) with pytest.raises(ValueError): bad_s.with_compiled_schema() s = foundation.ZCLCommandDef( id=0x12, name="test", schema={ "foo": t.uint8_t, "bar?": t.uint16_t, "baz?": t.uint8_t, }, direction=foundation.Direction.Client_to_Server, ) s = s.with_compiled_schema() str(s) assert s.schema.foo.type is t.uint8_t assert not s.schema.foo.optional assert s.schema.bar.type is t.uint16_t assert s.schema.bar.optional assert s.schema.baz.type is t.uint8_t assert s.schema.baz.optional assert "test" in str(s) and "direction=" assert singleton == singleton obj = {} obj[singleton] = 5 assert obj[singleton] == 5 zigpy-0.62.3/zigpy/000077500000000000000000000000001456054056700141225ustar00rootroot00000000000000zigpy-0.62.3/zigpy/__init__.py000066400000000000000000000000001456054056700162210ustar00rootroot00000000000000zigpy-0.62.3/zigpy/appdb.py000066400000000000000000001306741456054056700155750ustar00rootroot00000000000000from __future__ import annotations import asyncio import contextlib from datetime import datetime, timedelta, timezone import json import logging import re import types from typing import Any import aiosqlite import zigpy.appdb_schemas import zigpy.backups import zigpy.device import zigpy.endpoint import zigpy.exceptions import zigpy.group import zigpy.profiles import zigpy.quirks import zigpy.state import zigpy.types as t import zigpy.typing import zigpy.util from zigpy.zcl.clusters.general import Basic from zigpy.zdo import types as zdo_t LOGGER = logging.getLogger(__name__) DB_VERSION = 12 DB_V = f"_v{DB_VERSION}" MIN_SQLITE_VERSION = (3, 24, 0) UNIX_EPOCH = datetime.fromtimestamp(0, tz=timezone.utc) DB_V_REGEX = re.compile(r"(?:_v\d+)?$") MIN_UPDATE_DELTA = timedelta(seconds=30).total_seconds() def _import_compatible_sqlite3(min_version: tuple[int, int, int]) -> types.ModuleType: """Loads an SQLite module with a library version matching the provided constraint.""" import sqlite3 try: import pysqlite3 except ImportError: pysqlite3 = None for module in [sqlite3, pysqlite3]: if module is None: continue LOGGER.debug("SQLite version for %s: %s", module, module.sqlite_version) if module.sqlite_version_info >= min_version: return module else: min_ver = ".".join(map(str, min_version)) raise RuntimeError( f"zigpy requires SQLite {min_ver} or newer. If your distribution does not" f" provide a more recent release, install pysqlite3 with" f" `pip install pysqlite3-binary`" ) sqlite3 = _import_compatible_sqlite3(min_version=MIN_SQLITE_VERSION) def _register_sqlite_adapters(): def adapt_ieee(eui64): return str(eui64) sqlite3.register_adapter(t.EUI64, adapt_ieee) sqlite3.register_adapter(t.ExtendedPanId, adapt_ieee) def convert_ieee(s): return t.EUI64.convert(s.decode()) sqlite3.register_converter("ieee", convert_ieee) def aiosqlite_connect( database: str, iter_chunk_size: int = 64, **kwargs ) -> aiosqlite.Connection: """Copy of the the `aiosqlite.connect` function that connects using either the built-in `sqlite3` module or the imported `pysqlite3` module. """ return aiosqlite.Connection( connector=lambda: sqlite3.connect(str(database), **kwargs), iter_chunk_size=iter_chunk_size, ) def decode_str_attribute(value: str | bytes) -> str: if isinstance(value, str): return value return value.split(b"\x00", 1)[0].decode("utf-8") class PersistingListener(zigpy.util.CatchingTaskMixin): def __init__( self, connection: aiosqlite.Connection, application: zigpy.typing.ControllerApplicationType, ) -> None: _register_sqlite_adapters() self._db = connection self._application = application self._callback_handlers: asyncio.Queue = asyncio.Queue() self.running = False self._worker_task = asyncio.create_task(self._worker()) async def initialize_tables(self) -> None: async with self.execute("PRAGMA integrity_check") as cursor: rows = await cursor.fetchall() status = "\n".join(row[0] for row in rows) if status != "ok": LOGGER.error("SQLite database file is corrupted!\n%s", status) # Truncate the SQLite journal file instead of deleting it after transactions await self._set_isolation_level(None) await self.execute("PRAGMA journal_mode = WAL") await self.execute("PRAGMA synchronous = normal") await self.execute("PRAGMA temp_store = memory") await self._set_isolation_level("DEFERRED") await self.execute("PRAGMA foreign_keys = ON") await self._run_migrations() @classmethod async def new( cls, database_file: str, app: zigpy.typing.ControllerApplicationType ) -> PersistingListener: """Create an instance of persisting listener.""" sqlite_conn = await aiosqlite_connect( database_file, detect_types=sqlite3.PARSE_DECLTYPES, isolation_level="DEFERRED", # The default is "", an alias for "DEFERRED" ) listener = cls(sqlite_conn, app) try: await listener.initialize_tables() except Exception: await listener.shutdown() raise listener.running = True return listener async def _worker(self) -> None: """Process request in the received order.""" while True: cb_name, args = await self._callback_handlers.get() handler = getattr(self, cb_name) assert handler try: await handler(*args) except sqlite3.Error as exc: LOGGER.debug( "Error handling '%s' event with %s params: %s", cb_name, args, str(exc), ) except Exception as ex: LOGGER.error( "Unexpected error while processing %s(%s): %s", cb_name, args, ex ) self._callback_handlers.task_done() async def shutdown(self) -> None: """Shutdown connection.""" self.running = False await self._callback_handlers.join() if not self._worker_task.done(): self._worker_task.cancel() # Delete the journal on shutdown await self._set_isolation_level(None) await self.execute("PRAGMA wal_checkpoint;") await self._set_isolation_level("DEFERRED") await self._db.close() # FIXME: aiosqlite's thread won't always be closed immediately await asyncio.get_running_loop().run_in_executor(None, self._db.join) def enqueue(self, cb_name: str, *args) -> None: """Enqueue an async callback handler action.""" if not self.running: LOGGER.debug("Discarding %s event", cb_name) return self._callback_handlers.put_nowait((cb_name, args)) async def _set_isolation_level(self, level: str | None): """Set the SQLite statement isolation level in a thread-safe way.""" await self._db._execute(lambda: setattr(self._db, "isolation_level", level)) def execute(self, *args, **kwargs): return self._db.execute(*args, **kwargs) async def executescript(self, sql): """Naive replacement for `sqlite3.Cursor.executescript` that does not execute a `COMMIT` before running the script. This extra `COMMIT` breaks transactions that run scripts. """ # XXX: This will break if you use a semicolon anywhere but at the end of a line for statement in sql.split(";"): await self.execute(statement) def device_joined(self, device: zigpy.typing.DeviceType) -> None: self.enqueue("_update_device_nwk", device.ieee, device.nwk) async def _update_device_nwk(self, ieee: t.EUI64, nwk: t.NWK) -> None: await self.execute(f"UPDATE devices{DB_V} SET nwk=? WHERE ieee=?", (nwk, ieee)) await self._db.commit() def device_initialized(self, device: zigpy.typing.DeviceType) -> None: pass def device_left(self, device: zigpy.typing.DeviceType) -> None: pass def device_last_seen_updated( self, device: zigpy.typing.DeviceType, last_seen: datetime ) -> None: """Device last_seen time is updated.""" self.enqueue("_save_device_last_seen", device.ieee, last_seen) async def _save_device_last_seen(self, ieee: t.EUI64, last_seen: datetime) -> None: q = f"""UPDATE devices{DB_V} SET last_seen=:ts WHERE ieee=:ieee AND :ts - last_seen > :min_update_delta""" await self.execute( q, { "ts": last_seen.timestamp(), "ieee": ieee, "min_update_delta": MIN_UPDATE_DELTA, }, ) await self._db.commit() def device_relays_updated( self, device: zigpy.typing.DeviceType, relays: t.Relays | None ) -> None: """Device relay list is updated.""" self.enqueue("_save_device_relays", device.ieee, relays) async def _save_device_relays(self, ieee: t.EUI64, relays: t.Relays | None) -> None: if relays is None: await self.execute(f"DELETE FROM relays{DB_V} WHERE ieee = ?", (ieee,)) else: q = f"""INSERT INTO relays{DB_V} VALUES (:ieee, :relays) ON CONFLICT (ieee) DO UPDATE SET relays=excluded.relays WHERE relays != :relays""" await self.execute(q, {"ieee": ieee, "relays": relays.serialize()}) await self._db.commit() def attribute_updated( self, cluster: zigpy.typing.ClusterType, attrid: int, value: Any, timestamp: datetime, ) -> None: self.enqueue( "_save_attribute", cluster.endpoint.device.ieee, cluster.endpoint.endpoint_id, cluster.cluster_id, attrid, value, timestamp, ) def unsupported_attribute_added( self, cluster: zigpy.typing.ClusterType, attrid: int ) -> None: self.enqueue( "_unsupported_attribute_added", cluster.endpoint.device.ieee, cluster.endpoint.endpoint_id, cluster.cluster_id, attrid, ) async def _unsupported_attribute_added( self, ieee: t.EUI64, endpoint_id: int, cluster_id: int, attrid: int ) -> None: q = f"""INSERT INTO unsupported_attributes{DB_V} VALUES (?, ?, ?, ?) ON CONFLICT (ieee, endpoint_id, cluster, attrid) DO NOTHING""" await self.execute(q, (ieee, endpoint_id, cluster_id, attrid)) await self._db.commit() def unsupported_attribute_removed( self, cluster: zigpy.typing.ClusterType, attrid: int ) -> None: self.enqueue( "_unsupported_attribute_removed", cluster.endpoint.device.ieee, cluster.endpoint.endpoint_id, cluster.cluster_id, attrid, ) async def _unsupported_attribute_removed( self, ieee: t.EUI64, endpoint_id: int, cluster_id: int, attrid: int ) -> None: q = f"""DELETE FROM unsupported_attributes{DB_V} WHERE ieee = ? AND endpoint_id = ? AND cluster = ? AND attrid = ?""" await self.execute(q, (ieee, endpoint_id, cluster_id, attrid)) await self._db.commit() def neighbors_updated(self, ieee: t.EUI64, neighbors: list[zdo_t.Neighbor]) -> None: """Neighbor update from Mgmt_Lqi_req.""" self.enqueue("_neighbors_updated", ieee, neighbors) async def _neighbors_updated( self, ieee: t.EUI64, neighbors: list[zdo_t.Neighbor] ) -> None: await self.execute(f"DELETE FROM neighbors{DB_V} WHERE device_ieee = ?", [ieee]) rows = [(ieee,) + neighbor.as_tuple() for neighbor in neighbors] await self._db.executemany( f"INSERT INTO neighbors{DB_V} VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", rows ) await self._db.commit() def routes_updated(self, ieee: t.EUI64, routes: list[zdo_t.Route]) -> None: """Route update from Mgmt_Rtg_req.""" self.enqueue("_routes_updated", ieee, routes) async def _routes_updated(self, ieee: t.EUI64, routes: list[zdo_t.Route]) -> None: await self.execute(f"DELETE FROM routes{DB_V} WHERE device_ieee = ?", [ieee]) rows = [(ieee,) + route.as_tuple() for route in routes] await self._db.executemany( f"INSERT INTO routes{DB_V} VALUES (?,?,?,?,?,?,?,?)", rows ) await self._db.commit() def group_added(self, group: zigpy.group.Group) -> None: """Group is added.""" self.enqueue("_group_added", group) async def _group_added(self, group: zigpy.group.Group) -> None: q = f"""INSERT INTO groups{DB_V} VALUES (?, ?) ON CONFLICT (group_id) DO UPDATE SET name=excluded.name""" await self.execute(q, (group.group_id, group.name)) await self._db.commit() def group_member_added( self, group: zigpy.group.Group, ep: zigpy.typing.EndpointType ) -> None: """Called when a group member is added.""" self.enqueue("_group_member_added", group, ep) async def _group_member_added( self, group: zigpy.group.Group, ep: zigpy.typing.EndpointType ) -> None: q = f"""INSERT INTO group_members{DB_V} VALUES (?, ?, ?) ON CONFLICT DO NOTHING""" await self.execute(q, (group.group_id, *ep.unique_id)) await self._db.commit() def group_member_removed( self, group: zigpy.group.Group, ep: zigpy.typing.EndpointType ) -> None: """Called when a group member is removed.""" self.enqueue("_group_member_removed", group, ep) async def _group_member_removed( self, group: zigpy.group.Group, ep: zigpy.typing.EndpointType ) -> None: q = f"""DELETE FROM group_members{DB_V} WHERE group_id=? AND ieee=? AND endpoint_id=?""" await self.execute(q, (group.group_id, *ep.unique_id)) await self._db.commit() def group_removed(self, group: zigpy.group.Group) -> None: """Called when a group is removed.""" self.enqueue("_group_removed", group) async def _group_removed(self, group: zigpy.group.Group) -> None: q = f"DELETE FROM groups{DB_V} WHERE group_id=?" await self.execute(q, (group.group_id,)) await self._db.commit() def device_removed(self, device: zigpy.typing.DeviceType) -> None: self.enqueue("_remove_device", device) async def _remove_device(self, device: zigpy.typing.DeviceType) -> None: await self.execute(f"DELETE FROM devices{DB_V} WHERE ieee = ?", (device.ieee,)) await self._db.commit() def raw_device_initialized(self, device: zigpy.typing.DeviceType) -> None: self.enqueue("_save_device", device) async def _save_device(self, device: zigpy.typing.DeviceType) -> None: q = f"""INSERT INTO devices{DB_V} (ieee, nwk, status, last_seen) VALUES (?, ?, ?, ?) ON CONFLICT (ieee) DO UPDATE SET nwk=excluded.nwk, status=excluded.status, last_seen=excluded.last_seen""" await self.execute( q, ( device.ieee, device.nwk, device.status, (device._last_seen or UNIX_EPOCH).timestamp(), ), ) if device.node_desc is not None: await self._save_node_descriptor(device) if isinstance(device, zigpy.quirks.CustomDevice): await self._db.commit() return await self._save_endpoints(device) for ep in device.non_zdo_endpoints: await self._save_input_clusters(ep) await self._save_attribute_cache(ep) await self._save_unsupported_attributes(ep) await self._save_output_clusters(ep) await self._db.commit() async def _save_endpoints(self, device: zigpy.typing.DeviceType) -> None: rows = [ ( device.ieee, ep.endpoint_id, ep.profile_id, ep.device_type, ep.status, ) for ep in device.non_zdo_endpoints ] q = f"""INSERT INTO endpoints{DB_V} VALUES (?, ?, ?, ?, ?) ON CONFLICT (ieee, endpoint_id) DO UPDATE SET profile_id=excluded.profile_id, device_type=excluded.device_type, status=excluded.status""" await self._db.executemany(q, rows) async def _save_node_descriptor(self, device: zigpy.typing.DeviceType) -> None: q = f"""INSERT INTO node_descriptors{DB_V} VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT (ieee) DO UPDATE SET logical_type=excluded.logical_type, complex_descriptor_available=excluded.complex_descriptor_available, user_descriptor_available=excluded.user_descriptor_available, reserved=excluded.reserved, aps_flags=excluded.aps_flags, frequency_band=excluded.frequency_band, mac_capability_flags=excluded.mac_capability_flags, manufacturer_code=excluded.manufacturer_code, maximum_buffer_size=excluded.maximum_buffer_size, maximum_incoming_transfer_size=excluded.maximum_incoming_transfer_size, server_mask=excluded.server_mask, maximum_outgoing_transfer_size=excluded.maximum_outgoing_transfer_size, descriptor_capability_field=excluded.descriptor_capability_field""" await self.execute(q, (device.ieee,) + device.node_desc.as_tuple()) async def _save_input_clusters(self, endpoint: zigpy.typing.EndpointType) -> None: clusters = [ (endpoint.device.ieee, endpoint.endpoint_id, cluster.cluster_id) for cluster in endpoint.in_clusters.values() ] q = f"""INSERT INTO in_clusters{DB_V} VALUES (?, ?, ?) ON CONFLICT (ieee, endpoint_id, cluster) DO NOTHING""" await self._db.executemany(q, clusters) async def _save_attribute_cache(self, ep: zigpy.typing.EndpointType) -> None: clusters = [ ( ep.device.ieee, ep.endpoint_id, cluster.cluster_id, attrid, value, cluster._attr_last_updated.get(attrid, UNIX_EPOCH).timestamp(), ) for cluster in ep.in_clusters.values() for attrid, value in cluster._attr_cache.items() ] q = f"""INSERT INTO attributes_cache{DB_V} VALUES (?, ?, ?, ?, ?, ?) ON CONFLICT (ieee, endpoint_id, cluster, attrid) DO UPDATE SET value=excluded.value, last_updated=excluded.last_updated""" await self._db.executemany(q, clusters) async def _save_unsupported_attributes(self, ep: zigpy.typing.EndpointType) -> None: clusters = [ (ep.device.ieee, ep.endpoint_id, cluster.cluster_id, attr) for cluster in ep.in_clusters.values() for attr in cluster.unsupported_attributes if isinstance(attr, int) ] q = f"""INSERT INTO unsupported_attributes{DB_V} VALUES (?, ?, ?, ?) ON CONFLICT (ieee, endpoint_id, cluster, attrid) DO NOTHING""" await self._db.executemany(q, clusters) async def _save_output_clusters(self, endpoint: zigpy.typing.EndpointType) -> None: clusters = [ (endpoint.device.ieee, endpoint.endpoint_id, cluster.cluster_id) for cluster in endpoint.out_clusters.values() ] q = f"""INSERT INTO out_clusters{DB_V} VALUES (?, ?, ?) ON CONFLICT (ieee, endpoint_id, cluster) DO NOTHING""" await self._db.executemany(q, clusters) async def _save_attribute( self, ieee: t.EUI64, endpoint_id: int, cluster_id: int, attrid: int, value: Any, timestamp: datetime, ) -> None: q = f""" INSERT INTO attributes_cache{DB_V} VALUES (:ieee, :endpoint_id, :cluster_id, :attrid, :value, :timestamp) ON CONFLICT (ieee, endpoint_id, cluster, attrid) DO UPDATE SET value=excluded.value, last_updated=excluded.last_updated WHERE value != excluded.value OR :timestamp - last_updated > :min_update_delta """ await self.execute( q, { "ieee": ieee, "endpoint_id": endpoint_id, "cluster_id": cluster_id, "attrid": attrid, "value": value, "timestamp": timestamp.timestamp(), "min_update_delta": MIN_UPDATE_DELTA, }, ) await self._db.commit() def network_backup_created(self, backup: zigpy.backups.NetworkBackup) -> None: self.enqueue("_network_backup_created", json.dumps(backup.as_dict())) async def _network_backup_created(self, backup_json: str) -> None: q = f"""INSERT INTO network_backups{DB_V} VALUES (?, ?) ON CONFLICT (id) DO UPDATE SET backup_json=excluded.backup_json""" await self.execute(q, (None, backup_json)) await self._db.commit() def network_backup_removed(self, backup: zigpy.backups.NetworkBackup) -> None: self.enqueue("_network_backup_removed", backup.backup_time) async def _network_backup_removed(self, backup_time: datetime) -> None: q = f"""DELETE FROM network_backups{DB_V} WHERE json_extract(backup_json, '$.backup_time')=?""" await self.execute(q, (backup_time.isoformat(),)) await self._db.commit() async def load(self) -> None: LOGGER.debug("Loading application state") await self._load_devices() await self._load_node_descriptors() await self._load_endpoints() await self._load_clusters() # Quirks require the manufacturer and model name to be populated await self._load_attributes("attrid=4 OR attrid=5") for device in self._application.devices.values(): device = zigpy.quirks.get_device(device) self._application.devices[device.ieee] = device await self._load_attributes() await self._load_unsupported_attributes() await self._load_groups() await self._load_group_members() await self._load_relays() await self._load_neighbors() await self._load_routes() await self._load_network_backups() await self._register_device_listeners() async def _load_attributes(self, filter: str = None) -> None: if filter: query = f"SELECT * FROM attributes_cache{DB_V} WHERE {filter}" else: query = f"SELECT * FROM attributes_cache{DB_V}" async with self.execute(query) as cursor: async for ( ieee, endpoint_id, cluster, attrid, value, last_updated, ) in cursor: dev = self._application.get_device(ieee) # Some quirks create endpoints and clusters that do not exist if endpoint_id not in dev.endpoints: continue ep = dev.endpoints[endpoint_id] if cluster not in ep.in_clusters: continue ep.in_clusters[cluster]._attr_cache[attrid] = value ep.in_clusters[cluster]._attr_last_updated[ attrid ] = datetime.fromtimestamp(last_updated, timezone.utc) LOGGER.debug( "[0x%04x:%s:0x%04x] Attribute id: %s value: %s", dev.nwk, endpoint_id, cluster, attrid, value, ) # Populate the device's manufacturer and model attributes if cluster == Basic.cluster_id and attrid == 0x0004: dev.manufacturer = decode_str_attribute(value) elif cluster == Basic.cluster_id and attrid == 0x0005: dev.model = decode_str_attribute(value) async def _load_unsupported_attributes(self) -> None: """Load unsuppoted attributes.""" async with self.execute( f"SELECT * FROM unsupported_attributes{DB_V}" ) as cursor: async for (ieee, endpoint_id, cluster_id, attrid) in cursor: dev = self._application.get_device(ieee) try: ep = dev.endpoints[endpoint_id] except KeyError: continue try: cluster = ep.in_clusters[cluster_id] except KeyError: continue cluster.add_unsupported_attribute(attrid, inhibit_events=True) async def _load_devices(self) -> None: async with self.execute(f"SELECT * FROM devices{DB_V}") as cursor: async for (ieee, nwk, status, last_seen) in cursor: dev = self._application.add_device(ieee, nwk) dev.status = zigpy.device.Status(status) if last_seen > 0: dev.last_seen = last_seen async def _load_node_descriptors(self) -> None: async with self.execute(f"SELECT * FROM node_descriptors{DB_V}") as cursor: async for (ieee, *fields) in cursor: dev = self._application.get_device(ieee) dev.node_desc = zdo_t.NodeDescriptor(*fields) assert dev.node_desc.is_valid async def _load_endpoints(self) -> None: async with self.execute(f"SELECT * FROM endpoints{DB_V}") as cursor: async for (ieee, epid, profile_id, device_type, status) in cursor: dev = self._application.get_device(ieee) ep = dev.add_endpoint(epid) ep.profile_id = profile_id ep.status = zigpy.endpoint.Status(status) if profile_id == zigpy.profiles.zha.PROFILE_ID: ep.device_type = zigpy.profiles.zha.DeviceType(device_type) elif profile_id == zigpy.profiles.zll.PROFILE_ID: ep.device_type = zigpy.profiles.zll.DeviceType(device_type) else: ep.device_type = device_type async def _load_clusters(self) -> None: async with self.execute(f"SELECT * FROM in_clusters{DB_V}") as cursor: async for (ieee, endpoint_id, cluster) in cursor: dev = self._application.get_device(ieee) ep = dev.endpoints[endpoint_id] ep.add_input_cluster(cluster) async with self.execute(f"SELECT * FROM out_clusters{DB_V}") as cursor: async for (ieee, endpoint_id, cluster) in cursor: dev = self._application.get_device(ieee) ep = dev.endpoints[endpoint_id] ep.add_output_cluster(cluster) async def _load_groups(self) -> None: async with self.execute(f"SELECT * FROM groups{DB_V}") as cursor: async for (group_id, name) in cursor: self._application.groups.add_group(group_id, name, suppress_event=True) async def _load_group_members(self) -> None: async with self.execute(f"SELECT * FROM group_members{DB_V}") as cursor: async for (group_id, ieee, ep_id) in cursor: dev = self._application.get_device(ieee) group = self._application.groups[group_id] group.add_member(dev.endpoints[ep_id], suppress_event=True) async def _load_relays(self) -> None: async with self.execute(f"SELECT * FROM relays{DB_V}") as cursor: async for (ieee, value) in cursor: dev = self._application.get_device(ieee) dev.relays, _ = t.Relays.deserialize(value) async def _load_neighbors(self) -> None: async with self.execute(f"SELECT * FROM neighbors{DB_V}") as cursor: async for ieee, *fields in cursor: neighbor = zdo_t.Neighbor(*fields) self._application.topology.neighbors[ieee].append(neighbor) async def _load_routes(self) -> None: async with self.execute(f"SELECT * FROM routes{DB_V}") as cursor: async for ieee, *fields in cursor: route = zdo_t.Route(*fields) self._application.topology.routes[ieee].append(route) async def _load_network_backups(self) -> None: self._application.backups.backups.clear() async with self.execute( f"SELECT * FROM network_backups{DB_V} ORDER BY id" ) as cursor: backups = [] async for _id, backup_json in cursor: backup = zigpy.backups.NetworkBackup.from_dict(json.loads(backup_json)) backups.append(backup) backups.sort(key=lambda b: b.backup_time) for backup in backups: self._application.backups.add_backup(backup, suppress_event=True) async def _register_device_listeners(self) -> None: for dev in self._application.devices.values(): dev.add_context_listener(self) @contextlib.asynccontextmanager async def _transaction(self): await self.execute("BEGIN TRANSACTION") try: yield except Exception: await self.execute("ROLLBACK") raise else: await self.execute("COMMIT") async def _get_table_versions(self) -> dict[str, int]: tables = {} async with self.execute( "SELECT name FROM sqlite_master WHERE type='table'" ) as cursor: async for (name,) in cursor: # Ignore tables internal to SQLite if name.startswith("sqlite_"): continue # The regex will always return a match match = DB_V_REGEX.search(name) assert match is not None tables[name] = int(match.group(0)[2:] or "0") return tables async def _table_exists(self, name: str) -> bool: return name in (await self._get_table_versions()) async def _run_migrations(self) -> bool: """Migrates the database to the newest schema, returning True if migrations ran.""" tables = await self._get_table_versions() tables_version = max(tables.values(), default=0) async with self.execute("PRAGMA user_version") as cursor: (db_version,) = await cursor.fetchone() LOGGER.debug( "Current database version is v%s (table version v%s)", db_version, tables_version, ) # Table version suffixes were introduced in v4. If the table version suffix does # not match `user_version`, either zigpy was downgraded to a *really* old # version (July 2021), or it's corrupt. Running migrations could delete existing # table data, and since we cannot guarantee the schema is intact, fail early. if tables_version >= 4 and tables_version != db_version: raise zigpy.exceptions.CorruptDatabase( f"The `zigbee.db` database version ({db_version}) does not match its" f" max table version ({tables_version}). The database is inconsistent.", ) if db_version == 0 and not tables: # If this is a brand new database, just load the current schema await self.executescript(zigpy.appdb_schemas.SCHEMAS[DB_VERSION]) return False elif db_version > DB_VERSION: LOGGER.error( "This zigpy release uses database schema v%s but the database is v%s." " Downgrading zigpy is *not* recommended and may result in data loss." " Use at your own risk.", DB_VERSION, db_version, ) return False # All migrations must succeed. If any fail, the database is not touched. async with self._transaction(): for migration, to_db_version in [ (self._migrate_to_v4, 4), (self._migrate_to_v5, 5), (self._migrate_to_v6, 6), (self._migrate_to_v7, 7), (self._migrate_to_v8, 8), (self._migrate_to_v9, 9), (self._migrate_to_v10, 10), (self._migrate_to_v11, 11), (self._migrate_to_v12, 12), ]: if db_version >= min(to_db_version, DB_VERSION): continue LOGGER.info( "Migrating database from v%d to v%d", db_version, to_db_version ) await self.executescript(zigpy.appdb_schemas.SCHEMAS[to_db_version]) await migration() db_version = to_db_version return True async def _migrate_tables( self, table_map: dict[str, str], *, errors: str = "raise" ): """Copy rows from one set of tables into another.""" # Extract the "old" table version suffix tables = await self._get_table_versions() old_table_name = list(table_map.keys())[0] old_version = tables[old_table_name] # Check which tables would not be migrated old_tables = [t for t, v in tables.items() if v == old_version] unmigrated_old_tables = [t for t in old_tables if t not in table_map] if unmigrated_old_tables: raise RuntimeError( f"The following tables were not migrated: {unmigrated_old_tables}" ) # Insertion order matters for foreign key constraints but any rows that fail # to insert due to constraint violations can be discarded for old_table, new_table in table_map.items(): # Ignore tables without a migration if new_table is None: continue async with self.execute(f"SELECT * FROM {old_table}") as cursor: async for row in cursor: placeholders = ",".join("?" * len(row)) try: await self.execute( f"INSERT INTO {new_table} VALUES ({placeholders})", row ) except sqlite3.IntegrityError as e: if errors == "raise": raise elif errors == "warn": LOGGER.warning( "Failed to migrate row %s%s: %s", old_table, row, e ) elif errors == "ignore": pass else: raise ValueError(f"Invalid value for `errors`: {errors!r}") async def _migrate_to_v4(self): """Schema v4 expanded the node descriptor and neighbor table columns""" # The `node_descriptors` table was added in v1 if await self._table_exists("node_descriptors"): async with self.execute("SELECT * FROM node_descriptors") as cur: async for dev_ieee, value in cur: node_desc, rest = zdo_t.NodeDescriptor.deserialize(value) assert not rest await self.execute( "INSERT INTO node_descriptors_v4" " VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (dev_ieee,) + node_desc.as_tuple(), ) # The `neighbors` table was added in v3 but the version number was not # incremented. It may not exist. if await self._table_exists("neighbors"): async with self.execute("SELECT * FROM neighbors") as cur: async for dev_ieee, epid, ieee, nwk, packed, prm, depth, lqi in cur: neighbor = zdo_t.Neighbor( extended_pan_id=epid, ieee=ieee, nwk=nwk, permit_joining=prm, depth=depth, lqi=lqi, reserved2=0b000000, **zdo_t.Neighbor._parse_packed(packed), ) await self.execute( "INSERT INTO neighbors_v4 VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", (dev_ieee,) + neighbor.as_tuple(), ) async def _migrate_to_v5(self): """Schema v5 introduced global table version suffixes and removed stale rows""" await self._migrate_tables( { "devices": "devices_v5", "endpoints": "endpoints_v5", "clusters": "in_clusters_v5", "output_clusters": "out_clusters_v5", "groups": "groups_v5", "group_members": "group_members_v5", "relays": "relays_v5", "attributes": "attributes_cache_v5", # These were migrated in v4 "neighbors_v4": "neighbors_v5", "node_descriptors_v4": "node_descriptors_v5", # Explicitly specify which tables will not be migrated "neighbors": None, "node_descriptors": None, }, errors="warn", ) async def _migrate_to_v6(self): """Schema v6 relaxed the `attribute_cache` table schema to ignore endpoints""" await self._migrate_tables( { "devices_v5": "devices_v6", "endpoints_v5": "endpoints_v6", "in_clusters_v5": "in_clusters_v6", "out_clusters_v5": "out_clusters_v6", "groups_v5": "groups_v6", "group_members_v5": "group_members_v6", "relays_v5": "relays_v6", "attributes_cache_v5": "attributes_cache_v6", "neighbors_v5": "neighbors_v6", "node_descriptors_v5": "node_descriptors_v6", } ) # See if we can migrate any `attributes_cache` rows skipped by the v5 migration if await self._table_exists("attributes"): async with self.execute("SELECT count(*) FROM attributes") as cur: (num_attrs_v4,) = await cur.fetchone() async with self.execute("SELECT count(*) FROM attributes_cache_v6") as cur: (num_attrs_v6,) = await cur.fetchone() if num_attrs_v6 < num_attrs_v4: LOGGER.warning( "Migrating up to %d rows skipped by v5 migration", num_attrs_v4 - num_attrs_v6, ) await self._migrate_tables( { "attributes": "attributes_cache_v6", "devices": None, "endpoints": None, "clusters": None, "neighbors": None, "node_descriptors": None, "output_clusters": None, "groups": None, "group_members": None, "relays": None, }, errors="ignore", ) async def _migrate_to_v7(self): """Schema v7 added the `unsupported_attributes` table.""" await self._migrate_tables( { "devices_v6": "devices_v7", "endpoints_v6": "endpoints_v7", "in_clusters_v6": "in_clusters_v7", "out_clusters_v6": "out_clusters_v7", "groups_v6": "groups_v7", "group_members_v6": "group_members_v7", "relays_v6": "relays_v7", "attributes_cache_v6": "attributes_cache_v7", "neighbors_v6": "neighbors_v7", "node_descriptors_v6": "node_descriptors_v7", } ) async def _migrate_to_v8(self): """Schema v8 added the `devices_v8.last_seen` column.""" async with self.execute("SELECT * FROM devices_v7") as cursor: async for (ieee, nwk, status) in cursor: # Set the default `last_seen` to the unix epoch await self.execute( "INSERT INTO devices_v8 VALUES (?, ?, ?, ?)", (ieee, nwk, status, 0), ) # Copy the devices table first, it should have no conflicts await self._migrate_tables( { "endpoints_v7": "endpoints_v8", "in_clusters_v7": "in_clusters_v8", "out_clusters_v7": "out_clusters_v8", "groups_v7": "groups_v8", "group_members_v7": "group_members_v8", "relays_v7": "relays_v8", "attributes_cache_v7": "attributes_cache_v8", "neighbors_v7": "neighbors_v8", "node_descriptors_v7": "node_descriptors_v8", "unsupported_attributes_v7": "unsupported_attributes_v8", "devices_v7": None, } ) async def _migrate_to_v9(self): """Schema v9 changed the data type of the `devices_v8.last_seen` column.""" await self.execute( """INSERT INTO devices_v9 (ieee, nwk, status, last_seen) SELECT ieee, nwk, status, last_seen / 1000.0 FROM devices_v8""" ) await self._migrate_tables( { "endpoints_v8": "endpoints_v9", "in_clusters_v8": "in_clusters_v9", "out_clusters_v8": "out_clusters_v9", "groups_v8": "groups_v9", "group_members_v8": "group_members_v9", "relays_v8": "relays_v9", "attributes_cache_v8": "attributes_cache_v9", "neighbors_v8": "neighbors_v9", "node_descriptors_v8": "node_descriptors_v9", "unsupported_attributes_v8": "unsupported_attributes_v9", "devices_v8": None, } ) async def _migrate_to_v10(self): """Schema v10 added a new `network_backups_v10` table.""" await self._migrate_tables( { "devices_v9": "devices_v10", "endpoints_v9": "endpoints_v10", "in_clusters_v9": "in_clusters_v10", "out_clusters_v9": "out_clusters_v10", "groups_v9": "groups_v10", "group_members_v9": "group_members_v10", "relays_v9": "relays_v10", "attributes_cache_v9": "attributes_cache_v10", "neighbors_v9": "neighbors_v10", "node_descriptors_v9": "node_descriptors_v10", "unsupported_attributes_v9": "unsupported_attributes_v10", } ) async def _migrate_to_v11(self): """Schema v11 added a new `routes_v11` table.""" await self._migrate_tables( { "devices_v10": "devices_v11", "endpoints_v10": "endpoints_v11", "in_clusters_v10": "in_clusters_v11", "out_clusters_v10": "out_clusters_v11", "groups_v10": "groups_v11", "group_members_v10": "group_members_v11", "relays_v10": "relays_v11", "attributes_cache_v10": "attributes_cache_v11", "neighbors_v10": "neighbors_v11", "node_descriptors_v10": "node_descriptors_v11", "unsupported_attributes_v10": "unsupported_attributes_v11", "network_backups_v10": "network_backups_v11", } ) async def _migrate_to_v12(self): """Schema v12 added a `timestamp` column to attribute updates.""" await self._migrate_tables( { "devices_v11": "devices_v12", "endpoints_v11": "endpoints_v12", "in_clusters_v11": "in_clusters_v12", "neighbors_v11": "neighbors_v12", "routes_v11": "routes_v12", "node_descriptors_v11": "node_descriptors_v12", "out_clusters_v11": "out_clusters_v12", "groups_v11": "groups_v12", "group_members_v11": "group_members_v12", "relays_v11": "relays_v12", "unsupported_attributes_v11": "unsupported_attributes_v12", "network_backups_v11": "network_backups_v12", "attributes_cache_v11": None, } ) async with self.execute("SELECT * FROM attributes_cache_v11") as cursor: async for (ieee, endpoint_id, cluster_id, attrid, value) in cursor: # Set the default `last_updated` to the unix epoch await self.execute( "INSERT INTO attributes_cache_v12 VALUES (?, ?, ?, ?, ?, ?)", (ieee, endpoint_id, cluster_id, attrid, value, 0), ) zigpy-0.62.3/zigpy/appdb_schemas/000077500000000000000000000000001456054056700167135ustar00rootroot00000000000000zigpy-0.62.3/zigpy/appdb_schemas/__init__.py000066400000000000000000000006161456054056700210270ustar00rootroot00000000000000from __future__ import annotations import sys if sys.version_info >= (3, 9): import importlib.resources as importlib_resources else: import importlib_resources # Map each schema version to its SQL SCHEMAS = {} for file in importlib_resources.files(__name__).glob("schema_v*.sql"): n = int(file.name.replace("schema_v", "").replace(".sql", ""), 10) SCHEMAS[n] = file.read_text() zigpy-0.62.3/zigpy/appdb_schemas/schema_v0.sql000066400000000000000000000015341456054056700213040ustar00rootroot00000000000000PRAGMA user_version = 0; CREATE TABLE IF NOT EXISTS devices (ieee ieee, nwk, status); CREATE TABLE IF NOT EXISTS endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status); CREATE TABLE IF NOT EXISTS clusters (ieee ieee, endpoint_id, cluster); CREATE TABLE IF NOT EXISTS output_clusters (ieee ieee, endpoint_id, cluster); CREATE TABLE IF NOT EXISTS attributes (ieee ieee, endpoint_id, cluster, attrid, value); CREATE UNIQUE INDEX IF NOT EXISTS ieee_idx ON devices(ieee); CREATE UNIQUE INDEX IF NOT EXISTS endpoint_idx ON endpoints(ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS cluster_idx ON clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); zigpy-0.62.3/zigpy/appdb_schemas/schema_v1.sql000066400000000000000000000032551456054056700213070ustar00rootroot00000000000000PRAGMA user_version = 1; CREATE TABLE IF NOT EXISTS devices (ieee ieee, nwk, status); CREATE TABLE IF NOT EXISTS endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status); CREATE TABLE IF NOT EXISTS clusters (ieee ieee, endpoint_id, cluster); CREATE TABLE IF NOT EXISTS node_descriptors (ieee ieee, value, FOREIGN KEY(ieee) REFERENCES devices(ieee)); CREATE TABLE IF NOT EXISTS output_clusters (ieee ieee, endpoint_id, cluster); CREATE TABLE IF NOT EXISTS attributes (ieee ieee, endpoint_id, cluster, attrid, value); CREATE TABLE IF NOT EXISTS groups (group_id, name); CREATE TABLE IF NOT EXISTS group_members (group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id), FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id)); CREATE TABLE IF NOT EXISTS relays (ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE UNIQUE INDEX IF NOT EXISTS ieee_idx ON devices(ieee); CREATE UNIQUE INDEX IF NOT EXISTS endpoint_idx ON endpoints(ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS cluster_idx ON clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS node_descriptors_idx ON node_descriptors(ieee); CREATE UNIQUE INDEX IF NOT EXISTS output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX IF NOT EXISTS group_idx ON groups(group_id); CREATE UNIQUE INDEX IF NOT EXISTS group_members_idx ON group_members(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS relays_idx ON relays(ieee); zigpy-0.62.3/zigpy/appdb_schemas/schema_v10.sql000066400000000000000000000121061456054056700213620ustar00rootroot00000000000000PRAGMA user_version = 10; -- devices DROP TABLE IF EXISTS devices_v10; CREATE TABLE devices_v10 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL, last_seen REAL NOT NULL ); CREATE UNIQUE INDEX devices_idx_v10 ON devices_v10(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v10; CREATE TABLE endpoints_v10 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v10(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v10 ON endpoints_v10(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v10; CREATE TABLE in_clusters_v10 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v10(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v10 ON in_clusters_v10(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v10; CREATE TABLE neighbors_v10 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v10(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v10 ON neighbors_v10(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v10; CREATE TABLE node_descriptors_v10 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v10(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v10 ON node_descriptors_v10(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v10; CREATE TABLE out_clusters_v10 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v10(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v10 ON out_clusters_v10(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v10; CREATE TABLE attributes_cache_v10 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v10(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v10 ON attributes_cache_v10(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v10; CREATE TABLE groups_v10 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v10 ON groups_v10(group_id); -- group members DROP TABLE IF EXISTS group_members_v10; CREATE TABLE group_members_v10 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v10(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v10(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v10 ON group_members_v10(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v10; CREATE TABLE relays_v10 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v10(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v10 ON relays_v10(ieee); -- unsupported attributes DROP TABLE IF EXISTS unsupported_attributes_v10; CREATE TABLE unsupported_attributes_v10 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v10(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v10(ieee, endpoint_id, cluster) ON DELETE CASCADE ); CREATE UNIQUE INDEX unsupported_attributes_idx_v10 ON unsupported_attributes_v10(ieee, endpoint_id, cluster, attrid); -- network backups DROP TABLE IF EXISTS network_backups_v10; CREATE TABLE network_backups_v10 ( id INTEGER PRIMARY KEY AUTOINCREMENT, backup_json TEXT NOT NULL ); zigpy-0.62.3/zigpy/appdb_schemas/schema_v11.sql000066400000000000000000000127411456054056700213700ustar00rootroot00000000000000PRAGMA user_version = 11; -- devices DROP TABLE IF EXISTS devices_v11; CREATE TABLE devices_v11 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL, last_seen REAL NOT NULL ); CREATE UNIQUE INDEX devices_idx_v11 ON devices_v11(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v11; CREATE TABLE endpoints_v11 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v11(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v11 ON endpoints_v11(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v11; CREATE TABLE in_clusters_v11 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v11(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v11 ON in_clusters_v11(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v11; CREATE TABLE neighbors_v11 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v11(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v11 ON neighbors_v11(device_ieee); -- routes DROP TABLE IF EXISTS routes_v11; CREATE TABLE routes_v11 ( device_ieee ieee NOT NULL, dst_nwk INTEGER NOT NULL, route_status INTEGER NOT NULL, memory_constrained INTEGER NOT NULL, many_to_one INTEGER NOT NULL, route_record_required INTEGER NOT NULL, reserved INTEGER NOT NULL, next_hop INTEGER NOT NULL ); CREATE INDEX routes_idx_v11 ON routes_v11(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v11; CREATE TABLE node_descriptors_v11 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v11(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v11 ON node_descriptors_v11(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v11; CREATE TABLE out_clusters_v11 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v11(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v11 ON out_clusters_v11(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v11; CREATE TABLE attributes_cache_v11 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v11(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v11 ON attributes_cache_v11(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v11; CREATE TABLE groups_v11 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v11 ON groups_v11(group_id); -- group members DROP TABLE IF EXISTS group_members_v11; CREATE TABLE group_members_v11 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v11(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v11(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v11 ON group_members_v11(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v11; CREATE TABLE relays_v11 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v11(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v11 ON relays_v11(ieee); -- unsupported attributes DROP TABLE IF EXISTS unsupported_attributes_v11; CREATE TABLE unsupported_attributes_v11 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v11(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v11(ieee, endpoint_id, cluster) ON DELETE CASCADE ); CREATE UNIQUE INDEX unsupported_attributes_idx_v11 ON unsupported_attributes_v11(ieee, endpoint_id, cluster, attrid); -- network backups DROP TABLE IF EXISTS network_backups_v11; CREATE TABLE network_backups_v11 ( id INTEGER PRIMARY KEY AUTOINCREMENT, backup_json TEXT NOT NULL ); zigpy-0.62.3/zigpy/appdb_schemas/schema_v12.sql000066400000000000000000000130011456054056700213570ustar00rootroot00000000000000PRAGMA user_version = 12; -- devices DROP TABLE IF EXISTS devices_v12; CREATE TABLE devices_v12 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL, last_seen REAL NOT NULL ); CREATE UNIQUE INDEX devices_idx_v12 ON devices_v12(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v12; CREATE TABLE endpoints_v12 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v12(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v12 ON endpoints_v12(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v12; CREATE TABLE in_clusters_v12 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v12(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v12 ON in_clusters_v12(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v12; CREATE TABLE neighbors_v12 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v12(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v12 ON neighbors_v12(device_ieee); -- routes DROP TABLE IF EXISTS routes_v12; CREATE TABLE routes_v12 ( device_ieee ieee NOT NULL, dst_nwk INTEGER NOT NULL, route_status INTEGER NOT NULL, memory_constrained INTEGER NOT NULL, many_to_one INTEGER NOT NULL, route_record_required INTEGER NOT NULL, reserved INTEGER NOT NULL, next_hop INTEGER NOT NULL ); CREATE INDEX routes_idx_v12 ON routes_v12(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v12; CREATE TABLE node_descriptors_v12 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v12(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v12 ON node_descriptors_v12(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v12; CREATE TABLE out_clusters_v12 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v12(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v12 ON out_clusters_v12(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v12; CREATE TABLE attributes_cache_v12 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, last_updated REAL NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v12(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v12 ON attributes_cache_v12(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v12; CREATE TABLE groups_v12 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v12 ON groups_v12(group_id); -- group members DROP TABLE IF EXISTS group_members_v12; CREATE TABLE group_members_v12 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v12(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v12(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v12 ON group_members_v12(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v12; CREATE TABLE relays_v12 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v12(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v12 ON relays_v12(ieee); -- unsupported attributes DROP TABLE IF EXISTS unsupported_attributes_v12; CREATE TABLE unsupported_attributes_v12 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v12(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v12(ieee, endpoint_id, cluster) ON DELETE CASCADE ); CREATE UNIQUE INDEX unsupported_attributes_idx_v12 ON unsupported_attributes_v12(ieee, endpoint_id, cluster, attrid); -- network backups DROP TABLE IF EXISTS network_backups_v12; CREATE TABLE network_backups_v12 ( id INTEGER PRIMARY KEY AUTOINCREMENT, backup_json TEXT NOT NULL ); zigpy-0.62.3/zigpy/appdb_schemas/schema_v2.sql000066400000000000000000000040571456054056700213110ustar00rootroot00000000000000PRAGMA user_version = 2; CREATE TABLE IF NOT EXISTS devices (ieee ieee, nwk, status); CREATE TABLE IF NOT EXISTS endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS node_descriptors (ieee ieee, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS output_clusters (ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS attributes (ieee ieee, endpoint_id, cluster, attrid, value, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS groups (group_id, name); CREATE TABLE IF NOT EXISTS group_members (group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS relays (ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE UNIQUE INDEX IF NOT EXISTS ieee_idx ON devices(ieee); CREATE UNIQUE INDEX IF NOT EXISTS endpoint_idx ON endpoints(ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS cluster_idx ON clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS node_descriptors_idx ON node_descriptors(ieee); CREATE UNIQUE INDEX IF NOT EXISTS output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX IF NOT EXISTS group_idx ON groups(group_id); CREATE UNIQUE INDEX IF NOT EXISTS group_members_idx ON group_members(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS relays_idx ON relays(ieee); zigpy-0.62.3/zigpy/appdb_schemas/schema_v3.sql000066400000000000000000000040501456054056700213030ustar00rootroot00000000000000PRAGMA user_version = 3; CREATE TABLE IF NOT EXISTS devices (ieee ieee, nwk, status); CREATE TABLE IF NOT EXISTS endpoints (ieee ieee, endpoint_id, profile_id, device_type device_type, status); CREATE TABLE IF NOT EXISTS clusters (ieee ieee, endpoint_id, cluster); CREATE TABLE IF NOT EXISTS node_descriptors (ieee ieee, value, FOREIGN KEY(ieee) REFERENCES devices(ieee)); CREATE TABLE IF NOT EXISTS output_clusters (ieee ieee, endpoint_id, cluster); CREATE TABLE IF NOT EXISTS attributes (ieee ieee, endpoint_id, cluster, attrid, value); CREATE TABLE IF NOT EXISTS groups (group_id, name); CREATE TABLE IF NOT EXISTS group_members (group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id), FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id)); CREATE TABLE IF NOT EXISTS relays (ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE TABLE IF NOT EXISTS neighbors (device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL,ieee ieee NOT NULL, nwk INTEGER NOT NULL, struct INTEGER NOT NULL, permit_joining INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices(ieee) ON DELETE CASCADE); CREATE UNIQUE INDEX IF NOT EXISTS ieee_idx ON devices(ieee); CREATE UNIQUE INDEX IF NOT EXISTS endpoint_idx ON endpoints(ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS cluster_idx ON clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS node_descriptors_idx ON node_descriptors(ieee); CREATE UNIQUE INDEX IF NOT EXISTS output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); CREATE UNIQUE INDEX IF NOT EXISTS attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); CREATE UNIQUE INDEX IF NOT EXISTS group_idx ON groups(group_id); CREATE UNIQUE INDEX IF NOT EXISTS group_members_idx ON group_members(group_id, ieee, endpoint_id); CREATE UNIQUE INDEX IF NOT EXISTS relays_idx ON relays(ieee); CREATE INDEX IF NOT EXISTS neighbors_idx ON neighbors(device_ieee); zigpy-0.62.3/zigpy/appdb_schemas/schema_v4.sql000066400000000000000000000067641456054056700213220ustar00rootroot00000000000000PRAGMA user_version = 4; -- devices CREATE TABLE IF NOT EXISTS devices ( ieee ieee, nwk, status ); CREATE UNIQUE INDEX IF NOT EXISTS ieee_idx ON devices(ieee); -- endpoints CREATE TABLE IF NOT EXISTS endpoints ( ieee ieee, endpoint_id, profile_id, device_type device_type, status, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX IF NOT EXISTS endpoint_idx ON endpoints(ieee, endpoint_id); -- clusters CREATE TABLE IF NOT EXISTS clusters ( ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX IF NOT EXISTS cluster_idx ON clusters(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v4; CREATE TABLE neighbors_v4 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL ); CREATE INDEX neighbors_idx_v4 ON neighbors_v4(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v4; CREATE TABLE node_descriptors_v4 ( ieee ieee, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v4 ON node_descriptors_v4(ieee); -- output clusters CREATE TABLE IF NOT EXISTS output_clusters ( ieee ieee, endpoint_id, cluster, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX IF NOT EXISTS output_cluster_idx ON output_clusters(ieee, endpoint_id, cluster); -- attributes CREATE TABLE IF NOT EXISTS attributes ( ieee ieee, endpoint_id, cluster, attrid, value, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX IF NOT EXISTS attribute_idx ON attributes(ieee, endpoint_id, cluster, attrid); -- groups CREATE TABLE IF NOT EXISTS groups ( group_id, name ); CREATE UNIQUE INDEX IF NOT EXISTS group_idx ON groups(group_id); -- group members CREATE TABLE IF NOT EXISTS group_members ( group_id, ieee ieee, endpoint_id, FOREIGN KEY(group_id) REFERENCES groups(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX IF NOT EXISTS group_members_idx ON group_members(group_id, ieee, endpoint_id); -- relays CREATE TABLE IF NOT EXISTS relays ( ieee ieee, relays, FOREIGN KEY(ieee) REFERENCES devices(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX IF NOT EXISTS relays_idx ON relays(ieee); zigpy-0.62.3/zigpy/appdb_schemas/schema_v5.sql000066400000000000000000000104221456054056700213050ustar00rootroot00000000000000PRAGMA user_version = 5; -- devices DROP TABLE IF EXISTS devices_v5; CREATE TABLE devices_v5 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL ); CREATE UNIQUE INDEX devices_idx_v5 ON devices_v5(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v5; CREATE TABLE endpoints_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v5 ON endpoints_v5(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v5; CREATE TABLE in_clusters_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v5 ON in_clusters_v5(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v5; CREATE TABLE neighbors_v5 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v5 ON neighbors_v5(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v5; CREATE TABLE node_descriptors_v5 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v5 ON node_descriptors_v5(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v5; CREATE TABLE out_clusters_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v5 ON out_clusters_v5(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v5; CREATE TABLE attributes_cache_v5 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters that won't be present in the DB but whose -- values still need to be cached FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v5 ON attributes_cache_v5(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v5; CREATE TABLE groups_v5 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v5 ON groups_v5(group_id); -- group members DROP TABLE IF EXISTS group_members_v5; CREATE TABLE group_members_v5 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v5(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v5(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v5 ON group_members_v5(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v5; CREATE TABLE relays_v5 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v5(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v5 ON relays_v5(ieee); zigpy-0.62.3/zigpy/appdb_schemas/schema_v6.sql000066400000000000000000000104031456054056700213050ustar00rootroot00000000000000PRAGMA user_version = 6; -- devices DROP TABLE IF EXISTS devices_v6; CREATE TABLE devices_v6 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL ); CREATE UNIQUE INDEX devices_idx_v6 ON devices_v6(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v6; CREATE TABLE endpoints_v6 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v6(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v6 ON endpoints_v6(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v6; CREATE TABLE in_clusters_v6 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v6(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v6 ON in_clusters_v6(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v6; CREATE TABLE neighbors_v6 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v6(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v6 ON neighbors_v6(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v6; CREATE TABLE node_descriptors_v6 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v6(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v6 ON node_descriptors_v6(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v6; CREATE TABLE out_clusters_v6 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v6(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v6 ON out_clusters_v6(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v6; CREATE TABLE attributes_cache_v6 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v6(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v6 ON attributes_cache_v6(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v6; CREATE TABLE groups_v6 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v6 ON groups_v6(group_id); -- group members DROP TABLE IF EXISTS group_members_v6; CREATE TABLE group_members_v6 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v6(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v6(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v6 ON group_members_v6(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v6; CREATE TABLE relays_v6 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v6(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v6 ON relays_v6(ieee);zigpy-0.62.3/zigpy/appdb_schemas/schema_v7.sql000066400000000000000000000115041456054056700213110ustar00rootroot00000000000000PRAGMA user_version = 7; -- devices DROP TABLE IF EXISTS devices_v7; CREATE TABLE devices_v7 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL ); CREATE UNIQUE INDEX devices_idx_v7 ON devices_v7(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v7; CREATE TABLE endpoints_v7 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v7(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v7 ON endpoints_v7(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v7; CREATE TABLE in_clusters_v7 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v7(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v7 ON in_clusters_v7(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v7; CREATE TABLE neighbors_v7 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v7(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v7 ON neighbors_v7(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v7; CREATE TABLE node_descriptors_v7 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v7(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v7 ON node_descriptors_v7(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v7; CREATE TABLE out_clusters_v7 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v7(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v7 ON out_clusters_v7(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v7; CREATE TABLE attributes_cache_v7 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v7(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v7 ON attributes_cache_v7(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v7; CREATE TABLE groups_v7 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v7 ON groups_v7(group_id); -- group members DROP TABLE IF EXISTS group_members_v7; CREATE TABLE group_members_v7 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v7(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v7(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v7 ON group_members_v7(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v7; CREATE TABLE relays_v7 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v7(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v7 ON relays_v7(ieee); -- unsupported attributes DROP TABLE IF EXISTS unsupported_attributes_v7; CREATE TABLE unsupported_attributes_v7 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v7(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v7(ieee, endpoint_id, cluster) ON DELETE CASCADE ); CREATE UNIQUE INDEX unsupported_attributes_idx_v7 ON unsupported_attributes_v7(ieee, endpoint_id, cluster, attrid); zigpy-0.62.3/zigpy/appdb_schemas/schema_v8.sql000066400000000000000000000115531456054056700213160ustar00rootroot00000000000000PRAGMA user_version = 8; -- devices DROP TABLE IF EXISTS devices_v8; CREATE TABLE devices_v8 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL, last_seen unix_timestamp NOT NULL ); CREATE UNIQUE INDEX devices_idx_v8 ON devices_v8(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v8; CREATE TABLE endpoints_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v8 ON endpoints_v8(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v8; CREATE TABLE in_clusters_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v8(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v8 ON in_clusters_v8(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v8; CREATE TABLE neighbors_v8 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v8 ON neighbors_v8(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v8; CREATE TABLE node_descriptors_v8 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v8 ON node_descriptors_v8(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v8; CREATE TABLE out_clusters_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v8(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v8 ON out_clusters_v8(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v8; CREATE TABLE attributes_cache_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v8 ON attributes_cache_v8(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v8; CREATE TABLE groups_v8 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v8 ON groups_v8(group_id); -- group members DROP TABLE IF EXISTS group_members_v8; CREATE TABLE group_members_v8 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v8(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v8(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v8 ON group_members_v8(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v8; CREATE TABLE relays_v8 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v8 ON relays_v8(ieee); -- unsupported attributes DROP TABLE IF EXISTS unsupported_attributes_v8; CREATE TABLE unsupported_attributes_v8 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v8(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v8(ieee, endpoint_id, cluster) ON DELETE CASCADE ); CREATE UNIQUE INDEX unsupported_attributes_idx_v8 ON unsupported_attributes_v8(ieee, endpoint_id, cluster, attrid); zigpy-0.62.3/zigpy/appdb_schemas/schema_v9.sql000066400000000000000000000115411456054056700213140ustar00rootroot00000000000000PRAGMA user_version = 9; -- devices DROP TABLE IF EXISTS devices_v9; CREATE TABLE devices_v9 ( ieee ieee NOT NULL, nwk INTEGER NOT NULL, status INTEGER NOT NULL, last_seen REAL NOT NULL ); CREATE UNIQUE INDEX devices_idx_v9 ON devices_v9(ieee); -- endpoints DROP TABLE IF EXISTS endpoints_v9; CREATE TABLE endpoints_v9 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, profile_id INTEGER NOT NULL, device_type INTEGER NOT NULL, status INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v9(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX endpoint_idx_v9 ON endpoints_v9(ieee, endpoint_id); -- clusters DROP TABLE IF EXISTS in_clusters_v9; CREATE TABLE in_clusters_v9 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v9(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX in_clusters_idx_v9 ON in_clusters_v9(ieee, endpoint_id, cluster); -- neighbors DROP TABLE IF EXISTS neighbors_v9; CREATE TABLE neighbors_v9 ( device_ieee ieee NOT NULL, extended_pan_id ieee NOT NULL, ieee ieee NOT NULL, nwk INTEGER NOT NULL, device_type INTEGER NOT NULL, rx_on_when_idle INTEGER NOT NULL, relationship INTEGER NOT NULL, reserved1 INTEGER NOT NULL, permit_joining INTEGER NOT NULL, reserved2 INTEGER NOT NULL, depth INTEGER NOT NULL, lqi INTEGER NOT NULL, FOREIGN KEY(device_ieee) REFERENCES devices_v9(ieee) ON DELETE CASCADE ); CREATE INDEX neighbors_idx_v9 ON neighbors_v9(device_ieee); -- node descriptors DROP TABLE IF EXISTS node_descriptors_v9; CREATE TABLE node_descriptors_v9 ( ieee ieee NOT NULL, logical_type INTEGER NOT NULL, complex_descriptor_available INTEGER NOT NULL, user_descriptor_available INTEGER NOT NULL, reserved INTEGER NOT NULL, aps_flags INTEGER NOT NULL, frequency_band INTEGER NOT NULL, mac_capability_flags INTEGER NOT NULL, manufacturer_code INTEGER NOT NULL, maximum_buffer_size INTEGER NOT NULL, maximum_incoming_transfer_size INTEGER NOT NULL, server_mask INTEGER NOT NULL, maximum_outgoing_transfer_size INTEGER NOT NULL, descriptor_capability_field INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v9(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX node_descriptors_idx_v9 ON node_descriptors_v9(ieee); -- output clusters DROP TABLE IF EXISTS out_clusters_v9; CREATE TABLE out_clusters_v9 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v9(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX out_clusters_idx_v9 ON out_clusters_v9(ieee, endpoint_id, cluster); -- attributes DROP TABLE IF EXISTS attributes_cache_v9; CREATE TABLE attributes_cache_v9 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, value BLOB NOT NULL, -- Quirks can create "virtual" clusters and endpoints that won't be present in the -- DB but whose values still need to be cached FOREIGN KEY(ieee) REFERENCES devices_v9(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX attributes_idx_v9 ON attributes_cache_v9(ieee, endpoint_id, cluster, attrid); -- groups DROP TABLE IF EXISTS groups_v9; CREATE TABLE groups_v9 ( group_id INTEGER NOT NULL, name TEXT NOT NULL ); CREATE UNIQUE INDEX groups_idx_v9 ON groups_v9(group_id); -- group members DROP TABLE IF EXISTS group_members_v9; CREATE TABLE group_members_v9 ( group_id INTEGER NOT NULL, ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, FOREIGN KEY(group_id) REFERENCES groups_v9(group_id) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id) REFERENCES endpoints_v9(ieee, endpoint_id) ON DELETE CASCADE ); CREATE UNIQUE INDEX group_members_idx_v9 ON group_members_v9(group_id, ieee, endpoint_id); -- relays DROP TABLE IF EXISTS relays_v9; CREATE TABLE relays_v9 ( ieee ieee NOT NULL, relays BLOB NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v9(ieee) ON DELETE CASCADE ); CREATE UNIQUE INDEX relays_idx_v9 ON relays_v9(ieee); -- unsupported attributes DROP TABLE IF EXISTS unsupported_attributes_v9; CREATE TABLE unsupported_attributes_v9 ( ieee ieee NOT NULL, endpoint_id INTEGER NOT NULL, cluster INTEGER NOT NULL, attrid INTEGER NOT NULL, FOREIGN KEY(ieee) REFERENCES devices_v9(ieee) ON DELETE CASCADE, FOREIGN KEY(ieee, endpoint_id, cluster) REFERENCES in_clusters_v9(ieee, endpoint_id, cluster) ON DELETE CASCADE ); CREATE UNIQUE INDEX unsupported_attributes_idx_v9 ON unsupported_attributes_v9(ieee, endpoint_id, cluster, attrid); zigpy-0.62.3/zigpy/application.py000066400000000000000000001360621456054056700170070ustar00rootroot00000000000000from __future__ import annotations import abc import asyncio import collections import contextlib import errno import logging import os import random import sys import time import typing from typing import Any, Coroutine, TypeVar import warnings if sys.version_info[:2] < (3, 11): from async_timeout import timeout as asyncio_timeout # pragma: no cover else: from asyncio import timeout as asyncio_timeout # pragma: no cover import zigpy.appdb import zigpy.backups import zigpy.config as conf import zigpy.const as const import zigpy.device import zigpy.endpoint import zigpy.exceptions import zigpy.group import zigpy.listeners import zigpy.ota import zigpy.profiles import zigpy.quirks import zigpy.state import zigpy.topology import zigpy.types as t import zigpy.typing import zigpy.util import zigpy.zcl import zigpy.zdo import zigpy.zdo.types as zdo_types DEFAULT_ENDPOINT_ID = 1 LOGGER = logging.getLogger(__name__) TRANSIENT_CONNECTION_ERRORS = { errno.ENETUNREACH, } ENERGY_SCAN_WARN_THRESHOLD = 0.75 * 255 _R = TypeVar("_R") CHANNEL_CHANGE_BROADCAST_DELAY_S = 1.0 CHANNEL_CHANGE_SETTINGS_RELOAD_DELAY_S = 1.0 class ControllerApplication(zigpy.util.ListenableMixin, abc.ABC): SCHEMA = conf.CONFIG_SCHEMA _watchdog_period: int = 30 _probe_configs: list[dict[str, Any]] = [] def __init__(self, config: dict) -> None: self.devices: dict[t.EUI64, zigpy.device.Device] = {} self.state: zigpy.state.State = zigpy.state.State() self._listeners = {} self._config = self.SCHEMA(config) self._dblistener = None self._groups = zigpy.group.Groups(self) self._listeners = {} self._ota = zigpy.ota.OTA(self) self._send_sequence = 0 self._tasks: set[asyncio.Future[Any]] = set() self._watchdog_task: asyncio.Task | None = None self._concurrent_requests_semaphore = zigpy.util.DynamicBoundedSemaphore( self._config[conf.CONF_MAX_CONCURRENT_REQUESTS] ) self.backups: zigpy.backups.BackupManager = zigpy.backups.BackupManager(self) self.topology: zigpy.topology.Topology = zigpy.topology.Topology(self) self._req_listeners: collections.defaultdict[ zigpy.device.Device, collections.deque[zigpy.listeners.BaseRequestListener], ] = collections.defaultdict(lambda: collections.deque([])) def create_task( self, target: Coroutine[Any, Any, _R], name: str | None = None ) -> asyncio.Task[_R]: """Create a task and store a reference to it until the task completes. target: target to call. """ task = asyncio.get_running_loop().create_task(target, name=name) self._tasks.add(task) task.add_done_callback(self._tasks.remove) return task async def _load_db(self) -> None: """Restore save state.""" database_file = self.config[conf.CONF_DATABASE] if not database_file: return self._dblistener = await zigpy.appdb.PersistingListener.new(database_file, self) await self._dblistener.load() self._add_db_listeners() def _add_db_listeners(self): if self._dblistener is None: return self.add_listener(self._dblistener) self.groups.add_listener(self._dblistener) self.backups.add_listener(self._dblistener) self.topology.add_listener(self._dblistener) def _remove_db_listeners(self): if self._dblistener is None: return self.topology.remove_listener(self._dblistener) self.backups.remove_listener(self._dblistener) self.groups.remove_listener(self._dblistener) self.remove_listener(self._dblistener) async def initialize(self, *, auto_form: bool = False) -> None: """Starts the network on a connected radio, optionally forming one with random settings if necessary. """ # Make sure the first thing we do is feed the watchdog if self.config[conf.CONF_WATCHDOG_ENABLED]: await self.watchdog_feed() self._watchdog_task = asyncio.create_task(self._watchdog_loop()) last_backup = self.backups.most_recent_backup() try: await self.load_network_info(load_devices=False) except zigpy.exceptions.NetworkNotFormed: LOGGER.info("Network is not formed") if not auto_form: raise if last_backup is None: # Form a new network if we have no backup await self.form_network() else: # Otherwise, restore the most recent backup LOGGER.info("Restoring the most recent network backup") await self.backups.restore_backup(last_backup) LOGGER.debug("Network info: %s", self.state.network_info) LOGGER.debug("Node info: %s", self.state.node_info) new_state = self.backups.from_network_state() if ( self.config[conf.CONF_NWK_VALIDATE_SETTINGS] and last_backup is not None and not new_state.is_compatible_with(last_backup) ): raise zigpy.exceptions.NetworkSettingsInconsistent( f"Radio network settings are not compatible with most recent backup!\n" f"Current settings: {new_state!r}\n" f"Last backup: {last_backup!r}", old_state=last_backup, new_state=new_state, ) await self.start_network() self._persist_coordinator_model_strings_in_db() # Some radios erroneously permit joins on startup try: await self.permit(0) except zigpy.exceptions.DeliveryError as e: if e.status != t.MACStatus.MAC_CHANNEL_ACCESS_FAILURE: raise # Some radios (like the Conbee) can fail to deliver the startup broadcast # due to interference LOGGER.warning("Failed to send startup broadcast: %s", e) LOGGER.warning(const.INTERFERENCE_MESSAGE) if self.config[conf.CONF_STARTUP_ENERGY_SCAN]: # Each scan period is 15.36ms. Scan for at least 200ms (2^4 + 1 periods) to # pick up WiFi beacon frames. results = await self.energy_scan( channels=t.Channels.ALL_CHANNELS, duration_exp=4, count=1 ) LOGGER.debug("Startup energy scan: %s", results) if results[self.state.network_info.channel] > ENERGY_SCAN_WARN_THRESHOLD: LOGGER.warning( "Zigbee channel %s utilization is %0.2f%%!", self.state.network_info.channel, 100 * results[self.state.network_info.channel] / 255, ) LOGGER.warning(const.INTERFERENCE_MESSAGE) if self.config[conf.CONF_NWK_BACKUP_ENABLED]: self.backups.start_periodic_backups( # Config specifies the period in minutes, not seconds period=(60 * self.config[conf.CONF_NWK_BACKUP_PERIOD]) ) if self.config[conf.CONF_TOPO_SCAN_ENABLED]: # Config specifies the period in minutes, not seconds self.topology.start_periodic_scans( period=(60 * self.config[zigpy.config.CONF_TOPO_SCAN_PERIOD]) ) # Only initialize OTA after we've fully loaded await self.ota.initialize() async def startup(self, *, auto_form: bool = False) -> None: """Starts a network, optionally forming one with random settings if necessary.""" try: await self.connect() await self.initialize(auto_form=auto_form) except Exception as e: await self.shutdown(db=False) if isinstance(e, ConnectionError) or ( isinstance(e, OSError) and e.errno in TRANSIENT_CONNECTION_ERRORS ): raise zigpy.exceptions.TransientConnectionError() from e raise @classmethod async def new( cls, config: dict, auto_form: bool = False, start_radio: bool = True ) -> ControllerApplication: """Create new instance of application controller.""" app = cls(config) await app._load_db() if start_radio: await app.startup(auto_form=auto_form) return app async def energy_scan( self, channels: t.Channels, duration_exp: int, count: int ) -> dict[int, float]: """Runs an energy detection scan and returns the per-channel scan results.""" try: rsp = await self._device.zdo.Mgmt_NWK_Update_req( zigpy.zdo.types.NwkUpdate( ScanChannels=channels, ScanDuration=duration_exp, ScanCount=count, ) ) except (asyncio.TimeoutError, zigpy.exceptions.DeliveryError): LOGGER.warning("Coordinator does not support energy scanning") scanned_channels = channels energy_values = [0] * scanned_channels else: _, scanned_channels, _, _, energy_values = rsp return dict(zip(scanned_channels, energy_values)) async def _move_network_to_channel( self, new_channel: int, new_nwk_update_id: int ) -> None: """Broadcasts the channel migration update request.""" # Default implementation for radios that migrate via a loopback ZDO request await self._device.zdo.Mgmt_NWK_Update_req( zigpy.zdo.types.NwkUpdate( ScanChannels=zigpy.types.Channels.from_channel_list([new_channel]), ScanDuration=zigpy.zdo.types.NwkUpdate.CHANNEL_CHANGE_REQ, nwkUpdateId=new_nwk_update_id, ) ) async def move_network_to_channel( self, new_channel: int, *, num_broadcasts: int = 5 ) -> None: """Moves the network to a new channel.""" if self.state.network_info.channel == new_channel: return new_nwk_update_id = (self.state.network_info.nwk_update_id + 1) % 0xFF for attempt in range(num_broadcasts): LOGGER.info( "Broadcasting migration to channel %s (%s of %s)", new_channel, attempt + 1, num_broadcasts, ) await zigpy.zdo.broadcast( app=self, command=zigpy.zdo.types.ZDOCmd.Mgmt_NWK_Update_req, grpid=None, radius=30, # Explicitly set the maximum radius broadcast_address=zigpy.types.BroadcastAddress.ALL_DEVICES, NwkUpdate=zigpy.zdo.types.NwkUpdate( ScanChannels=zigpy.types.Channels.from_channel_list([new_channel]), ScanDuration=zigpy.zdo.types.NwkUpdate.CHANNEL_CHANGE_REQ, nwkUpdateId=new_nwk_update_id, ), ) await asyncio.sleep(CHANNEL_CHANGE_BROADCAST_DELAY_S) # Move the coordinator itself, if supported await self._move_network_to_channel( new_channel=new_channel, new_nwk_update_id=new_nwk_update_id ) # Wait for settings to update while self.state.network_info.channel != new_channel: LOGGER.info("Waiting for channel change to take effect") await self.load_network_info(load_devices=False) await asyncio.sleep(CHANNEL_CHANGE_SETTINGS_RELOAD_DELAY_S) LOGGER.info("Successfully migrated to channel %d", new_channel) async def form_network(self, *, fast: bool = False) -> None: """Writes random network settings to the coordinator.""" # First, make the settings consistent and randomly generate missing values channel = self.config[conf.CONF_NWK][conf.CONF_NWK_CHANNEL] channels = self.config[conf.CONF_NWK][conf.CONF_NWK_CHANNELS] pan_id = self.config[conf.CONF_NWK][conf.CONF_NWK_PAN_ID] extended_pan_id = self.config[conf.CONF_NWK][conf.CONF_NWK_EXTENDED_PAN_ID] network_key = self.config[conf.CONF_NWK][conf.CONF_NWK_KEY] tc_address = self.config[conf.CONF_NWK][conf.CONF_NWK_TC_ADDRESS] stack_specific = {} if fast: # Indicate to the radio library that the network is ephemeral stack_specific["form_quickly"] = True if pan_id is None: pan_id = random.SystemRandom().randint(0x0001, 0xFFFE + 1) if channel is None and fast: # Don't run an energy scan if this is an ephemeral network channel = next(iter(channels)) elif channel is None and not fast: # We can't run an energy scan without a running network on most radios try: await self.start_network() except zigpy.exceptions.NetworkNotFormed: await self.form_network(fast=True) await self.start_network() channel_energy = await self.energy_scan( channels=t.Channels.ALL_CHANNELS, duration_exp=4, count=1 ) channel = zigpy.util.pick_optimal_channel(channel_energy, channels=channels) if extended_pan_id is None: # TODO: exclude `FF:FF:FF:FF:FF:FF:FF:FF` and possibly more reserved EPIDs extended_pan_id = t.ExtendedPanId(os.urandom(8)) if network_key is None: network_key = t.KeyData(os.urandom(16)) if tc_address is None: tc_address = t.EUI64.UNKNOWN network_info = zigpy.state.NetworkInfo( extended_pan_id=extended_pan_id, pan_id=pan_id, nwk_update_id=self.config[conf.CONF_NWK][conf.CONF_NWK_UPDATE_ID], nwk_manager_id=0x0000, channel=channel, channel_mask=t.Channels.from_channel_list([channel]), security_level=5, network_key=zigpy.state.Key( key=network_key, tx_counter=0, rx_counter=0, seq=self.config[conf.CONF_NWK][conf.CONF_NWK_KEY_SEQ], ), tc_link_key=zigpy.state.Key( key=self.config[conf.CONF_NWK][conf.CONF_NWK_TC_LINK_KEY], tx_counter=0, rx_counter=0, seq=0, partner_ieee=tc_address, ), children=[], key_table=[], nwk_addresses={}, stack_specific=stack_specific, ) node_info = zigpy.state.NodeInfo( nwk=0x0000, ieee=t.EUI64.UNKNOWN, # Use the device IEEE address logical_type=zdo_types.LogicalType.Coordinator, ) LOGGER.debug("Forming a new network") await self.backups.restore_backup( backup=zigpy.backups.NetworkBackup( network_info=network_info, node_info=node_info, ), counter_increment=0, allow_incomplete=True, create_new=(not fast), ) async def shutdown(self, *, db: bool = True) -> None: """Shutdown controller.""" if self._watchdog_task is not None: self._watchdog_task.cancel() self.backups.stop_periodic_backups() self.topology.stop_periodic_scans() try: await self.disconnect() except Exception: LOGGER.warning("Failed to disconnect from radio", exc_info=True) if db and self._dblistener: self._remove_db_listeners() try: await self._dblistener.shutdown() except Exception: LOGGER.warning("Failed to disconnect from database", exc_info=True) def add_device(self, ieee: t.EUI64, nwk: t.NWK) -> zigpy.device.Device: """Creates a zigpy `Device` object with the provided IEEE and NWK addresses.""" assert isinstance(ieee, t.EUI64) # TODO: Shut down existing device dev = zigpy.device.Device(self, ieee, nwk) self.devices[ieee] = dev return dev def device_initialized(self, device: zigpy.device.Device) -> None: """Used by a device to signal that it is initialized""" LOGGER.debug("Device is initialized %s", device) self.listener_event("raw_device_initialized", device) device = zigpy.quirks.get_device(device) self.devices[device.ieee] = device if self._dblistener is not None: device.add_context_listener(self._dblistener) self.listener_event("device_initialized", device) async def remove( self, ieee: t.EUI64, remove_children: bool = True, rejoin: bool = False ) -> None: """Try to remove a device from the network. :param ieee: address of the device to be removed """ assert isinstance(ieee, t.EUI64) dev = self.devices.get(ieee) if not dev: LOGGER.debug("Device not found for removal: %s", ieee) return dev.cancel_initialization() LOGGER.info("Removing device 0x%04x (%s)", dev.nwk, ieee) self.create_task( self._remove_device(dev, remove_children=remove_children, rejoin=rejoin), f"remove_device-nwk={dev.nwk!r}-ieee={ieee!r}", ) if dev.node_desc is not None and dev.node_desc.is_end_device: parents = [] for parent in self.devices.values(): for zdo_neighbor in self.topology.neighbors[parent.ieee]: try: neighbor = self.get_device(ieee=zdo_neighbor.ieee) except KeyError: continue if neighbor is dev: parents.append(parent) for parent in parents: LOGGER.debug( "Sending leave request for %s to %s parent", dev.ieee, parent.ieee ) opts = parent.zdo.LeaveOptions.RemoveChildren if rejoin: opts |= parent.zdo.LeaveOptions.Rejoin parent.zdo.create_catching_task( parent.zdo.Mgmt_Leave_req(dev.ieee, opts) ) self.listener_event("device_removed", dev) async def _remove_device( self, device: zigpy.device.Device, remove_children: bool = True, rejoin: bool = False, ) -> None: """Send a remove request then pop the device.""" try: async with asyncio_timeout( 30 if device.node_desc is not None and device.node_desc.is_end_device else 7 ): await device.zdo.leave(remove_children=remove_children, rejoin=rejoin) except (zigpy.exceptions.DeliveryError, asyncio.TimeoutError) as ex: LOGGER.debug("Sending 'zdo_leave_req' failed: %s", ex) self.devices.pop(device.ieee, None) def deserialize( self, sender: zigpy.device.Device, endpoint_id: t.uint8_t, cluster_id: t.uint16_t, data: bytes, ) -> tuple[Any, bytes]: return sender.deserialize(endpoint_id, cluster_id, data) def handle_join( self, nwk: t.NWK, ieee: t.EUI64, parent_nwk: t.NWK, *, handle_rejoin: bool = True, ) -> None: """Called when a device joins or announces itself on the network.""" ieee = t.EUI64(ieee) try: dev = self.get_device(ieee=ieee) except KeyError: dev = self.add_device(ieee, nwk) LOGGER.info("New device 0x%04x (%s) joined the network", nwk, ieee) new_join = True else: if handle_rejoin: LOGGER.info("Device 0x%04x (%s) joined the network", nwk, ieee) new_join = False if dev.nwk != nwk: LOGGER.debug("Device %s changed id (0x%04x => 0x%04x)", ieee, dev.nwk, nwk) dev.nwk = nwk new_join = True # Not all stacks send a ZDO command when a device joins so the last_seen should # be updated dev.update_last_seen() if new_join: self.listener_event("device_joined", dev) dev.schedule_initialize() elif not dev.is_initialized: # Re-initialize partially-initialized devices but don't emit "device_joined" dev.schedule_initialize() elif handle_rejoin: # Rescan groups for devices that are not newly joining and initialized dev.schedule_group_membership_scan() def handle_leave(self, nwk: t.NWK, ieee: t.EUI64): """Called when a device has left the network.""" LOGGER.info("Device 0x%04x (%s) left the network", nwk, ieee) try: dev = self.get_device(ieee=ieee) except KeyError: return else: self.listener_event("device_left", dev) def handle_relays(self, nwk: t.NWK, relays: list[t.NWK]) -> None: """Called when a list of relaying devices is received.""" try: device = self.get_device(nwk=nwk) except KeyError: LOGGER.warning("Received relays from an unknown device: %s", nwk) self.create_task( self._discover_unknown_device(nwk), f"discover_unknown_device_from_relays-nwk={nwk!r}", ) else: # `relays` is a property with a setter that emits an event device.relays = relays @classmethod async def probe(cls, device_config: dict[str, Any]) -> bool | dict[str, Any]: """Probes the device specified by `device_config` and returns valid device settings if the radio supports the device. If the device is not supported, `False` is returned. """ device_configs = [conf.SCHEMA_DEVICE(device_config)] for overrides in cls._probe_configs: new_config = conf.SCHEMA_DEVICE({**device_config, **overrides}) if new_config not in device_configs: device_configs.append(new_config) for device_config in device_configs: app = cls(cls.SCHEMA({conf.CONF_DEVICE: device_config})) try: await app.connect() except Exception: LOGGER.debug( "Failed to probe with config %s", device_config, exc_info=True ) else: return device_config finally: await app.disconnect() return False @abc.abstractmethod async def connect(self): """Connect to the radio hardware and verify that it is compatible with the library. This method should be stateless if the connection attempt fails. """ raise NotImplementedError() # pragma: no cover async def watchdog_feed(self) -> None: """ Reset the firmware watchdog timer. """ LOGGER.debug("Feeding watchdog") await self._watchdog_feed() async def _watchdog_feed(self) -> None: """ Reset the firmware watchdog timer. Implemented by the radio library. """ async def _watchdog_loop(self) -> None: """ Watchdog loop to periodically test if the stack is still running. """ LOGGER.debug("Starting watchdog loop") while True: await asyncio.sleep(self._watchdog_period) try: await self.watchdog_feed() except Exception as e: LOGGER.warning("Watchdog failure", exc_info=e) # Treat the watchdog failure as a disconnect self.connection_lost(e) break LOGGER.debug("Stopping watchdog loop") def connection_lost(self, exc: Exception) -> None: """Connection lost callback.""" LOGGER.debug("Connection to the radio has been lost: %r", exc) self.listener_event("connection_lost", exc) @abc.abstractmethod async def disconnect(self): """Disconnects from the radio hardware and shuts down the network.""" raise NotImplementedError() # pragma: no cover @abc.abstractmethod async def start_network(self): """Starts a Zigbee network with settings currently stored in the radio hardware.""" raise NotImplementedError() # pragma: no cover @abc.abstractmethod async def force_remove(self, dev: zigpy.device.Device): """Instructs the radio to remove a device with a lower-level leave command. Not all radios implement this. """ raise NotImplementedError() # pragma: no cover @abc.abstractmethod async def add_endpoint(self, descriptor: zdo_types.SimpleDescriptor): """Registers a new endpoint on the controlled device. Not all radios will implement this. """ raise NotImplementedError() # pragma: no cover async def register_endpoints(self) -> None: """Registers all necessary endpoints. The exact order in which this method is called depends on the radio module. """ await self.add_endpoint( zdo_types.SimpleDescriptor( endpoint=1, profile=zigpy.profiles.zha.PROFILE_ID, device_type=zigpy.profiles.zha.DeviceType.IAS_CONTROL, device_version=0b0000, input_clusters=[ zigpy.zcl.clusters.general.Basic.cluster_id, zigpy.zcl.clusters.general.OnOff.cluster_id, zigpy.zcl.clusters.general.Time.cluster_id, zigpy.zcl.clusters.general.Ota.cluster_id, zigpy.zcl.clusters.security.IasAce.cluster_id, ], output_clusters=[ zigpy.zcl.clusters.general.PowerConfiguration.cluster_id, zigpy.zcl.clusters.general.PollControl.cluster_id, zigpy.zcl.clusters.security.IasZone.cluster_id, zigpy.zcl.clusters.security.IasWd.cluster_id, ], ) ) await self.add_endpoint( zdo_types.SimpleDescriptor( endpoint=2, profile=zigpy.profiles.zll.PROFILE_ID, device_type=zigpy.profiles.zll.DeviceType.CONTROLLER, device_version=0b0000, input_clusters=[zigpy.zcl.clusters.general.Basic.cluster_id], output_clusters=[], ) ) for endpoint in self.config[conf.CONF_ADDITIONAL_ENDPOINTS]: await self.add_endpoint(endpoint) @contextlib.asynccontextmanager async def _limit_concurrency(self): """Async context manager to limit global coordinator request concurrency.""" start_time = time.monotonic() was_locked = self._concurrent_requests_semaphore.locked() if was_locked: LOGGER.debug( "Max concurrency (%s) reached, delaying request (%s enqueued)", self._concurrent_requests_semaphore.max_value, self._concurrent_requests_semaphore.num_waiting, ) async with self._concurrent_requests_semaphore: if was_locked: LOGGER.debug( "Previously delayed request is now running, delayed by %0.2fs", time.monotonic() - start_time, ) yield @abc.abstractmethod async def send_packet(self, packet: t.ZigbeePacket) -> None: """Send a Zigbee packet using the appropriate addressing mode and provided options.""" raise NotImplementedError() # pragma: no cover def build_source_route_to(self, dest: zigpy.device.Device) -> list[t.NWK] | None: """Compute a source route to the destination device.""" if dest.relays is None: return None # TODO: utilize topology scanner information return dest.relays[::-1] async def request( self, device: zigpy.device.Device, profile: t.uint16_t, cluster: t.uint16_t, src_ep: t.uint8_t, dst_ep: t.uint8_t, sequence: t.uint8_t, data: bytes, *, expect_reply: bool = True, use_ieee: bool = False, extended_timeout: bool = False, ) -> tuple[zigpy.zcl.foundation.Status, str]: """Submit and send data out as an unicast transmission. :param device: destination device :param profile: Zigbee Profile ID to use for outgoing message :param cluster: cluster id where the message is being sent :param src_ep: source endpoint id :param dst_ep: destination endpoint id :param sequence: transaction sequence number of the message :param data: Zigbee message payload :param expect_reply: True if this is essentially a request :param use_ieee: use EUI64 for destination addressing :param extended_timeout: instruct the radio to use slower APS retries """ if use_ieee: src = t.AddrModeAddress( addr_mode=t.AddrMode.IEEE, address=self.state.node_info.ieee ) dst = t.AddrModeAddress(addr_mode=t.AddrMode.IEEE, address=device.ieee) else: src = t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=self.state.node_info.nwk ) dst = t.AddrModeAddress(addr_mode=t.AddrMode.NWK, address=device.nwk) if self.config[conf.CONF_SOURCE_ROUTING]: source_route = self.build_source_route_to(dest=device) else: source_route = None tx_options = t.TransmitOptions.NONE if not expect_reply: tx_options |= t.TransmitOptions.ACK await self.send_packet( t.ZigbeePacket( src=src, src_ep=src_ep, dst=dst, dst_ep=dst_ep, tsn=sequence, profile_id=profile, cluster_id=cluster, data=t.SerializableBytes(data), extended_timeout=extended_timeout, source_route=source_route, tx_options=tx_options, ) ) return (zigpy.zcl.foundation.Status.SUCCESS, "") async def mrequest( self, group_id: t.uint16_t, profile: t.uint8_t, cluster: t.uint16_t, src_ep: t.uint8_t, sequence: t.uint8_t, data: bytes, *, hops: int = 0, non_member_radius: int = 3, ): """Submit and send data out as a multicast transmission. :param group_id: destination multicast address :param profile: Zigbee Profile ID to use for outgoing message :param cluster: cluster id where the message is being sent :param src_ep: source endpoint id :param sequence: transaction sequence number of the message :param data: Zigbee message payload :param hops: the message will be delivered to all nodes within this number of hops of the sender. A value of zero is converted to MAX_HOPS :param non_member_radius: the number of hops that the message will be forwarded by devices that are not members of the group. A value of 7 or greater is treated as infinite """ await self.send_packet( t.ZigbeePacket( src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=self.state.node_info.nwk ), src_ep=src_ep, dst=t.AddrModeAddress(addr_mode=t.AddrMode.Group, address=group_id), tsn=sequence, profile_id=profile, cluster_id=cluster, data=t.SerializableBytes(data), tx_options=t.TransmitOptions.NONE, radius=hops, non_member_radius=non_member_radius, ) ) return (zigpy.zcl.foundation.Status.SUCCESS, "") async def broadcast( self, profile: t.uint16_t, cluster: t.uint16_t, src_ep: t.uint8_t, dst_ep: t.uint8_t, grpid: t.uint16_t, radius: int, sequence: t.uint8_t, data: bytes, broadcast_address: t.BroadcastAddress = t.BroadcastAddress.RX_ON_WHEN_IDLE, ) -> tuple[zigpy.zcl.foundation.Status, str]: """Submit and send data out as an unicast transmission. :param profile: Zigbee Profile ID to use for outgoing message :param cluster: cluster id where the message is being sent :param src_ep: source endpoint id :param dst_ep: destination endpoint id :param: grpid: group id to address the broadcast to :param radius: max radius of the broadcast :param sequence: transaction sequence number of the message :param data: zigbee message payload :param timeout: how long to wait for transmission ACK :param broadcast_address: broadcast address. """ await self.send_packet( t.ZigbeePacket( src=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=self.state.node_info.nwk ), src_ep=src_ep, dst=t.AddrModeAddress( addr_mode=t.AddrMode.Broadcast, address=broadcast_address ), dst_ep=dst_ep, tsn=sequence, profile_id=profile, cluster_id=cluster, data=t.SerializableBytes(data), tx_options=t.TransmitOptions.NONE, radius=radius, ) ) return (zigpy.zcl.foundation.Status.SUCCESS, "") async def _discover_unknown_device(self, nwk: t.NWK) -> None: """Discover the IEEE address of a device with an unknown NWK.""" return await zigpy.zdo.broadcast( app=self, command=zdo_types.ZDOCmd.IEEE_addr_req, grpid=None, radius=0, NWKAddrOfInterest=nwk, RequestType=zdo_types.AddrRequestType.Single, StartIndex=0, ) def _maybe_parse_zdo(self, packet: t.ZigbeePacket) -> None: """Attempt to parse an incoming packet as ZDO, to extract useful notifications.""" # The current zigpy device may not exist if we receive a packet early try: zdo = self._device.zdo except KeyError: zdo = zigpy.zdo.ZDO(None) try: zdo_hdr, zdo_args = zdo.deserialize( cluster_id=packet.cluster_id, data=packet.data.serialize() ) except ValueError: LOGGER.debug("Could not parse ZDO message from packet") return # Interpret useful global ZDO responses and notifications if zdo_hdr.command_id == zdo_types.ZDOCmd.Device_annce: nwk, ieee, _ = zdo_args self.handle_join(nwk=nwk, ieee=ieee, parent_nwk=None) elif zdo_hdr.command_id in ( zdo_types.ZDOCmd.NWK_addr_rsp, zdo_types.ZDOCmd.IEEE_addr_rsp, ): status, ieee, nwk, _, _, _ = zdo_args if status == zdo_types.Status.SUCCESS: LOGGER.debug("Discovered IEEE address for NWK=%s: %s", nwk, ieee) self.handle_join( nwk=nwk, ieee=ieee, parent_nwk=None, handle_rejoin=False ) def packet_received(self, packet: t.ZigbeePacket) -> None: """Notify zigpy of a received Zigbee packet.""" LOGGER.debug("Received a packet: %r", packet) assert packet.src is not None assert packet.dst is not None # Peek into ZDO packets to handle possible ZDO notifications if zigpy.zdo.ZDO_ENDPOINT in (packet.src_ep, packet.dst_ep): self._maybe_parse_zdo(packet) try: device = self.get_device_with_address(packet.src) except KeyError: LOGGER.warning("Unknown device %r", packet.src) if packet.src.addr_mode == t.AddrMode.NWK: # Manually send a ZDO IEEE address request to discover the device self.create_task( self._discover_unknown_device(packet.src.address), f"discover_unknown_device_from_packet-nwk={packet.src.address!r}", ) return self.listener_event( "handle_message", device, packet.profile_id, packet.cluster_id, packet.src_ep, packet.dst_ep, packet.data.serialize(), ) if device.is_initialized: return device.packet_received(packet) LOGGER.debug( "Received frame on uninitialized device %s" " from ep %s to ep %s, cluster %s: %r", device, packet.src_ep, packet.dst_ep, packet.cluster_id, packet.data, ) if ( packet.dst_ep == 0 or device.all_endpoints_init or ( device.has_non_zdo_endpoints and packet.cluster_id == zigpy.zcl.clusters.general.Basic.cluster_id ) ): # Allow the following responses: # - any ZDO # - ZCL if endpoints are initialized # - ZCL from Basic packet.cluster_id if endpoints are initializing if not device.initializing: device.schedule_initialize() return device.packet_received(packet) # Give quirks a chance to fast-initialize the device (at the moment only Xiaomi) zigpy.quirks.handle_message_from_uninitialized_sender( device, packet.profile_id, packet.cluster_id, packet.src_ep, packet.dst_ep, packet.data.serialize(), ) # Reload the device device object, in it was replaced by the quirk device = self.get_device(ieee=device.ieee) # If the quirk did not fast-initialize the device, start initialization if not device.initializing and not device.is_initialized: device.schedule_initialize() def handle_message( self, sender: zigpy.device.Device, profile: int, cluster: int, src_ep: int, dst_ep: int, message: bytes, *, dst_addressing: zigpy.typing.AddressingMode | None = None, ): """Deprecated compatibility function. Use `packet_received` instead.""" warnings.warn( "`handle_message` is deprecated, use `packet_received`", DeprecationWarning ) if dst_addressing is None: dst_addressing = t.AddrMode.NWK self.packet_received( t.ZigbeePacket( profile_id=profile, cluster_id=cluster, src_ep=src_ep, dst_ep=dst_ep, data=t.SerializableBytes(message), src=t.AddrModeAddress( addr_mode=dst_addressing, address={ t.AddrMode.NWK: sender.nwk, t.AddrMode.IEEE: sender.ieee, }[dst_addressing], ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.NWK, address=self.state.node_info.nwk, ), ) ) def get_device_with_address( self, address: t.AddrModeAddress ) -> zigpy.device.Device: """Gets a `Device` object using the provided address mode address.""" if address.addr_mode == t.AddrMode.NWK: return self.get_device(nwk=address.address) elif address.addr_mode == t.AddrMode.IEEE: return self.get_device(ieee=address.address) else: raise ValueError(f"Invalid address: {address!r}") @contextlib.contextmanager def callback_for_response( self, src: zigpy.device.Device | zigpy.listeners.ANY_DEVICE, filters: list[zigpy.listeners.MatcherType], callback: typing.Callable[ [ zigpy.zcl.foundation.ZCLHeader, zigpy.zcl.foundation.CommandSchema, ], typing.Any, ], ) -> typing.Any: """Context manager to create a callback that is passed Zigbee responses.""" listener = zigpy.listeners.CallbackListener( matchers=tuple(filters), callback=callback, ) self._req_listeners[src].append(listener) try: yield finally: self._req_listeners[src].remove(listener) @contextlib.contextmanager def wait_for_response( self, src: zigpy.device.Device | zigpy.listeners.ANY_DEVICE, filters: list[zigpy.listeners.MatcherType], ) -> typing.Any: """Context manager to wait for a Zigbee response.""" listener = zigpy.listeners.FutureListener( matchers=tuple(filters), future=asyncio.get_running_loop().create_future(), ) self._req_listeners[src].append(listener) try: yield listener.future finally: self._req_listeners[src].remove(listener) @abc.abstractmethod async def permit_ncp(self, time_s: int = 60) -> None: """Permit joining on NCP. Not all radios will require this method. """ raise NotImplementedError() # pragma: no cover async def permit_with_key(self, node: t.EUI64, code: bytes, time_s: int = 60): """Permit a node to join with the provided install code bytes.""" warnings.warn( "`permit_with_key` is deprecated, use `permit_with_link_key`", DeprecationWarning, ) key = zigpy.util.convert_install_code(code) if key is None: raise ValueError(f"Invalid install code: {code!r}") await self.permit_with_link_key(node=node, link_key=key, time_s=time_s) @abc.abstractmethod async def permit_with_link_key( self, node: t.EUI64, link_key: t.KeyData, time_s: int = 60 ) -> None: """Permit a node to join with the provided link key.""" raise NotImplementedError() # pragma: no cover @abc.abstractmethod async def write_network_info( self, *, network_info: zigpy.state.NetworkInfo, node_info: zigpy.state.NodeInfo, ) -> None: """Writes network and node state to the radio hardware. Any information not supported by the radio should be logged as a warning. """ raise NotImplementedError() # pragma: no cover @abc.abstractmethod async def load_network_info(self, *, load_devices: bool = False) -> None: """Loads network and node information from the radio hardware. :param load_devices: if `False`, supplementary network information that may take a while to load should be skipped. For example, device NWK addresses and link keys. """ raise NotImplementedError() # pragma: no cover @abc.abstractmethod async def reset_network_info(self) -> None: """Leaves the current network.""" raise NotImplementedError() # pragma: no cover async def permit(self, time_s: int = 60, node: t.EUI64 | str | None = None) -> None: """Permit joining on a specific node or all router nodes.""" assert 0 <= time_s <= 254 if node is not None: if not isinstance(node, t.EUI64): node = t.EUI64([t.uint8_t(p) for p in node]) if node != self.state.node_info.ieee: try: dev = self.get_device(ieee=node) r = await dev.zdo.permit(time_s) LOGGER.debug("Sent 'mgmt_permit_joining_req' to %s: %s", node, r) except KeyError: LOGGER.warning("Device '%s' not found", node) except zigpy.exceptions.DeliveryError as ex: LOGGER.warning("Couldn't open '%s' for joining: %s", node, ex) else: await self.permit_ncp(time_s) return await zigpy.zdo.broadcast( self, # app zdo_types.ZDOCmd.Mgmt_Permit_Joining_req, # command 0x0000, # grpid 0x00, # radius time_s, 0, broadcast_address=t.BroadcastAddress.ALL_ROUTERS_AND_COORDINATOR, ) await self.permit_ncp(time_s) def get_sequence(self) -> t.uint8_t: self._send_sequence = (self._send_sequence + 1) % 256 return self._send_sequence def get_device( self, ieee: t.EUI64 = None, nwk: t.NWK | int = None ) -> zigpy.device.Device: """Looks up a device in the `devices` dictionary based either on its NWK or IEEE address. """ if ieee is not None: return self.devices[ieee] # If there two coordinators are loaded from the database, we want the active one if nwk == self.state.node_info.nwk: return self.devices[self.state.node_info.ieee] # TODO: Make this not terrible # Unlike its IEEE address, a device's NWK address can change at runtime so this # is not as simple as building a second mapping for dev in self.devices.values(): if dev.nwk == nwk: return dev raise KeyError(f"Device not found: nwk={nwk!r}, ieee={ieee!r}") def get_endpoint_id(self, cluster_id: int, is_server_cluster: bool = False) -> int: """Returns coordinator endpoint id for specified cluster id.""" return DEFAULT_ENDPOINT_ID def get_dst_address(self, cluster: zigpy.zcl.Cluster) -> zdo_types.MultiAddress: """Helper to get a dst address for bind/unbind operations. Allows radios to provide correct information especially for radios which listen on specific endpoints only. :param cluster: cluster instance to be bound to coordinator :returns: returns a "destination address" """ dstaddr = zdo_types.MultiAddress() dstaddr.addrmode = 3 dstaddr.ieee = self.state.node_info.ieee dstaddr.endpoint = self.get_endpoint_id(cluster.cluster_id, cluster.is_server) return dstaddr def update_config(self, partial_config: dict[str, Any]) -> None: """Update existing config.""" self.config = {**self.config, **partial_config} @property def config(self) -> dict: """Return current configuration.""" return self._config @config.setter def config(self, new_config) -> None: """Configuration setter.""" self._config = self.SCHEMA(new_config) @property def groups(self) -> zigpy.group.Groups: return self._groups @property def ota(self) -> zigpy.ota.OTA: return self._ota @property def _device(self) -> zigpy.device.Device: """The device being controlled.""" return self.get_device(ieee=self.state.node_info.ieee) def _persist_coordinator_model_strings_in_db(self) -> None: cluster = self._device.endpoints[1].add_input_cluster( zigpy.zcl.clusters.general.Basic.cluster_id ) cluster.update_attribute( attrid=zigpy.zcl.clusters.general.Basic.AttributeDefs.model.id, value=self._device.model, ) cluster.update_attribute( attrid=zigpy.zcl.clusters.general.Basic.AttributeDefs.manufacturer.id, value=self._device.manufacturer, ) self.device_initialized(self._device) zigpy-0.62.3/zigpy/backups.py000066400000000000000000000403121456054056700161240ustar00rootroot00000000000000"""Classes to interact with zigpy network backups, including JSON serialization.""" from __future__ import annotations import asyncio import copy import dataclasses from datetime import datetime, timezone import logging from typing import TYPE_CHECKING, Any import zigpy.config as conf import zigpy.state import zigpy.types as t from zigpy.util import ListenableMixin if TYPE_CHECKING: import zigpy.application LOGGER = logging.getLogger(__name__) BACKUP_FORMAT_VERSION = 1 @dataclasses.dataclass class NetworkBackup(t.BaseDataclassMixin): version: int = dataclasses.field(default=BACKUP_FORMAT_VERSION) backup_time: datetime = dataclasses.field( default_factory=lambda: datetime.now(timezone.utc) ) network_info: zigpy.state.NetworkInfo = dataclasses.field( default_factory=zigpy.state.NetworkInfo ) node_info: zigpy.state.NodeInfo = dataclasses.field( default_factory=zigpy.state.NodeInfo ) def is_compatible_with(self, backup: NetworkBackup) -> bool: """Two backups are compatible if, ignoring frame counters, the same external device will be able to join either network. """ return ( self.node_info.nwk == backup.node_info.nwk and self.node_info.logical_type == backup.node_info.logical_type and self.node_info.ieee == backup.node_info.ieee and self.network_info.extended_pan_id == backup.network_info.extended_pan_id and self.network_info.pan_id == backup.network_info.pan_id and self.network_info.nwk_update_id == backup.network_info.nwk_update_id and self.network_info.nwk_manager_id == backup.network_info.nwk_manager_id and self.network_info.channel == backup.network_info.channel and self.network_info.security_level == backup.network_info.security_level and self.network_info.tc_link_key.key == backup.network_info.tc_link_key.key and self.network_info.network_key.key == backup.network_info.network_key.key ) def supersedes(self, backup: NetworkBackup) -> bool: """Checks if this network backup is more recent than another backup.""" return ( self.is_compatible_with(backup) and ( self.network_info.network_key.tx_counter > backup.network_info.network_key.tx_counter ) and self.network_info.nwk_update_id >= backup.network_info.nwk_update_id ) def is_complete(self) -> bool: """Checks if this backup captures enough network state to recreate the network.""" return ( self.node_info.ieee != t.EUI64.UNKNOWN and self.network_info.extended_pan_id != t.EUI64.UNKNOWN and self.network_info.pan_id not in (0x0000, 0xFFFF) and self.network_info.channel in range(11, 26 + 1) and self.network_info.network_key.key != t.KeyData.UNKNOWN ) def as_dict(self) -> dict[str, Any]: return { "version": self.version, "backup_time": self.backup_time.isoformat(), "network_info": self.network_info.as_dict(), "node_info": self.node_info.as_dict(), } @classmethod def from_dict(cls, obj: dict[str, Any]) -> NetworkBackup: if "metadata" in obj: return cls.from_open_coordinator_json(obj) elif "network_info" in obj: version = obj.get("version", 0) # Version 1 introduced the `model`, `manufacturer`, and `version` fields if version == 0: obj = copy.deepcopy(obj) obj["node_info"]["model"] = None obj["node_info"]["manufacturer"] = None obj["node_info"]["version"] = None version = 1 assert version == BACKUP_FORMAT_VERSION return cls( version=BACKUP_FORMAT_VERSION, backup_time=datetime.fromisoformat(obj["backup_time"]), network_info=zigpy.state.NetworkInfo.from_dict(obj["network_info"]), node_info=zigpy.state.NodeInfo.from_dict(obj["node_info"]), ) else: raise ValueError(f"Invalid network backup object: {obj!r}") def as_open_coordinator_json(self) -> dict[str, Any]: return _network_backup_to_open_coordinator_backup(self) @classmethod def from_open_coordinator_json(cls, obj: dict[str, Any]) -> NetworkBackup: return _open_coordinator_backup_to_network_backup(obj) class BackupManager(ListenableMixin): def __init__(self, app: zigpy.application.ControllerApplication): super().__init__() self.app: zigpy.application.ControllerApplication = app self.backups: list[NetworkBackup] = [] self._backup_task: asyncio.Task | None = None def most_recent_backup(self) -> NetworkBackup | None: """Most recent network backup""" return self.backups[-1] if self.backups else None def from_network_state(self) -> NetworkBackup: """Create a backup object from the current network's state.""" return NetworkBackup( network_info=self.app.state.network_info, node_info=self.app.state.node_info, ) async def create_backup(self, *, load_devices: bool = False) -> NetworkBackup: await self.app.load_network_info(load_devices=load_devices) backup = self.from_network_state() self.add_backup(backup) return backup async def restore_backup( self, backup: NetworkBackup, *, counter_increment: int = 10000, allow_incomplete: bool = False, create_new: bool = True, ) -> None: LOGGER.debug("Restoring backup %s", backup) if not backup.is_complete() and not allow_incomplete: raise ValueError("Backup is incomplete, it is not possible to restore") key = backup.network_info.network_key new_backup = NetworkBackup( network_info=backup.network_info.replace( network_key=key.replace(tx_counter=key.tx_counter + counter_increment) ), node_info=backup.node_info, ) await self.app.write_network_info( network_info=new_backup.network_info, node_info=new_backup.node_info, ) if create_new: await self.create_backup() def add_backup( self, backup: NetworkBackup, *, suppress_event: bool = False ) -> None: """Adds a new backup to the database, superseding older ones if necessary.""" LOGGER.debug("Adding a new backup %s", backup) if not backup.is_complete(): LOGGER.debug("Backup is incomplete, ignoring") return # Only delete the most recent backup if the frame counter doesn't roll back. # 1. Old Conbee backups replace one another: the FC never increments # 2. EZSP -> old Conbee: create bad backup for Conbee # 3. Old Conbee -> EZSP: replace Conbee backup, its FC is always zero for old_backup in self.backups[:]: if backup.is_compatible_with(old_backup) and ( backup.network_info.network_key.tx_counter >= old_backup.network_info.network_key.tx_counter ): if not suppress_event: self.listener_event("network_backup_removed", old_backup) self.backups.remove(old_backup) if not suppress_event: self.listener_event("network_backup_created", backup) self.backups.append(backup) def start_periodic_backups(self, period: int | float) -> None: self.stop_periodic_backups() self._backup_task = asyncio.create_task(self._backup_loop(period)) def stop_periodic_backups(self): if self._backup_task is not None: self._backup_task.cancel() async def _backup_loop(self, period: int | float): while True: try: await self.create_backup() except Exception: LOGGER.warning("Failed to create a network backup", exc_info=True) LOGGER.debug("Waiting for %ss before backing up again", period) await asyncio.sleep(period) def __getitem__(self, key) -> NetworkBackup: return self.backups[key] def _network_backup_to_open_coordinator_backup(backup: NetworkBackup) -> dict[str, Any]: """Converts a `NetworkBackup` to an Open Coordinator Backup-compatible dictionary.""" node_info = backup.node_info network_info = backup.network_info devices = {} for ieee, nwk in network_info.nwk_addresses.items(): devices[ieee] = { "ieee_address": ieee.serialize()[::-1].hex(), "nwk_address": nwk.serialize()[::-1].hex(), "is_child": False, } for ieee in network_info.children: if ieee not in devices: devices[ieee] = { "ieee_address": ieee.serialize()[::-1].hex(), "nwk_address": None, "is_child": True, } else: devices[ieee]["is_child"] = True for key in network_info.key_table: if key.partner_ieee not in devices: devices[key.partner_ieee] = { "ieee_address": key.partner_ieee.serialize()[::-1].hex(), "nwk_address": None, "is_child": False, } devices[key.partner_ieee]["link_key"] = { "key": key.key.serialize().hex(), "tx_counter": key.tx_counter, "rx_counter": key.rx_counter, } return { "metadata": { "version": 1, "format": "zigpy/open-coordinator-backup", "source": network_info.source, "internal": { "creation_time": backup.backup_time.isoformat(), "node": { "ieee": node_info.ieee.serialize()[::-1].hex(), "nwk": node_info.nwk.serialize()[::-1].hex(), "type": zigpy.state.LOGICAL_TYPE_TO_JSON[node_info.logical_type], "model": node_info.model, "manufacturer": node_info.manufacturer, "version": node_info.version, }, "network": { "tc_link_key": { "key": network_info.tc_link_key.key.serialize().hex(), "frame_counter": network_info.tc_link_key.tx_counter, }, "tc_address": network_info.tc_link_key.partner_ieee.serialize()[ ::-1 ].hex(), "nwk_manager": network_info.nwk_manager_id.serialize()[::-1].hex(), }, "link_key_seqs": { key.partner_ieee.serialize()[::-1].hex(): key.seq for key in network_info.key_table }, **network_info.metadata, }, }, "stack_specific": network_info.stack_specific, "coordinator_ieee": node_info.ieee.serialize()[::-1].hex(), "pan_id": network_info.pan_id.serialize()[::-1].hex(), "extended_pan_id": network_info.extended_pan_id.serialize()[::-1].hex(), "nwk_update_id": network_info.nwk_update_id, "security_level": network_info.security_level, "channel": network_info.channel, "channel_mask": list(network_info.channel_mask), "network_key": { "key": network_info.network_key.key.serialize().hex(), "sequence_number": network_info.network_key.seq or 0, "frame_counter": network_info.network_key.tx_counter or 0, }, "devices": sorted(devices.values(), key=lambda d: d["ieee_address"]), } def _open_coordinator_backup_to_network_backup(obj: dict[str, Any]) -> NetworkBackup: """Creates a `NetworkBackup` from an Open Coordinator Backup dictionary.""" internal = obj["metadata"].get("internal", {}) node_info = zigpy.state.NodeInfo() node_meta = internal.get("node", {}) if "nwk" in node_meta: node_info.nwk, _ = t.NWK.deserialize(bytes.fromhex(node_meta["nwk"])[::-1]) else: node_info.nwk = t.NWK(0x0000) node_info.logical_type = zigpy.state.JSON_TO_LOGICAL_TYPE[ node_meta.get("type", "coordinator") ] # Should be identical to `metadata.internal.node.ieee` node_info.ieee, _ = t.EUI64.deserialize( bytes.fromhex(obj["coordinator_ieee"])[::-1] ) node_info.model = node_meta.get("model") node_info.manufacturer = node_meta.get("manufacturer") node_info.version = node_meta.get("version") network_info = zigpy.state.NetworkInfo() network_info.source = obj["metadata"]["source"] network_info.metadata = { k: v for k, v in internal.items() if k not in ("node", "network", "link_key_seqs", "creation_time") } network_info.pan_id, _ = t.NWK.deserialize(bytes.fromhex(obj["pan_id"])[::-1]) network_info.extended_pan_id, _ = t.EUI64.deserialize( bytes.fromhex(obj["extended_pan_id"])[::-1] ) network_info.nwk_update_id = obj["nwk_update_id"] network_meta = internal.get("network", {}) if "nwk_manager" in network_meta: network_info.nwk_manager_id, _ = t.NWK.deserialize( bytes.fromhex(network_meta["nwk_manager"]) ) else: network_info.nwk_manager_id = t.NWK(0x0000) network_info.channel = obj["channel"] network_info.channel_mask = t.Channels.from_channel_list(obj["channel_mask"]) network_info.security_level = obj["security_level"] if obj.get("stack_specific"): network_info.stack_specific = obj.get("stack_specific") network_info.tc_link_key = zigpy.state.Key() if "tc_link_key" in network_meta: network_info.tc_link_key.key, _ = t.KeyData.deserialize( bytes.fromhex(network_meta["tc_link_key"]["key"]) ) network_info.tc_link_key.tx_counter = network_meta["tc_link_key"].get( "frame_counter", 0 ) network_info.tc_link_key.partner_ieee, _ = t.EUI64.deserialize( bytes.fromhex(network_meta["tc_address"])[::-1] ) else: network_info.tc_link_key.key = conf.CONF_NWK_TC_LINK_KEY_DEFAULT network_info.tc_link_key.partner_ieee = node_info.ieee network_info.network_key = zigpy.state.Key() network_info.network_key.key, _ = t.KeyData.deserialize( bytes.fromhex(obj["network_key"]["key"]) ) network_info.network_key.tx_counter = obj["network_key"]["frame_counter"] network_info.network_key.seq = obj["network_key"]["sequence_number"] network_info.children = [] network_info.nwk_addresses = {} for device in obj["devices"]: if device["nwk_address"] is not None: nwk, _ = t.NWK.deserialize(bytes.fromhex(device["nwk_address"])[::-1]) else: nwk = None ieee, _ = t.EUI64.deserialize(bytes.fromhex(device["ieee_address"])[::-1]) # The `is_child` key is currently optional if device.get("is_child", True): network_info.children.append(ieee) if nwk is not None: network_info.nwk_addresses[ieee] = nwk if "link_key" in device: key = zigpy.state.Key() key.key, _ = t.KeyData.deserialize(bytes.fromhex(device["link_key"]["key"])) key.tx_counter = device["link_key"]["tx_counter"] key.rx_counter = device["link_key"]["rx_counter"] key.partner_ieee = ieee try: key.seq = obj["metadata"]["internal"]["link_key_seqs"][ device["ieee_address"] ] except KeyError: key.seq = 0 network_info.key_table.append(key) # XXX: Devices that are not children, have no NWK address, and have no link key # are effectively ignored, since there is no place to write them if "date" in internal: # Z2M format creation_time = internal["date"].replace("Z", "+00:00") else: # Zigpy format creation_time = internal.get("creation_time", "1970-01-01T00:00:00+00:00") return NetworkBackup( version=BACKUP_FORMAT_VERSION, backup_time=datetime.fromisoformat(creation_time), network_info=network_info, node_info=node_info, ) zigpy-0.62.3/zigpy/config/000077500000000000000000000000001456054056700153675ustar00rootroot00000000000000zigpy-0.62.3/zigpy/config/__init__.py000066400000000000000000000171031456054056700175020ustar00rootroot00000000000000"""Config schemas and validation.""" from __future__ import annotations import voluptuous as vol from zigpy.config.defaults import ( CONF_DEVICE_BAUDRATE_DEFAULT, CONF_DEVICE_FLOW_CONTROL_DEFAULT, CONF_MAX_CONCURRENT_REQUESTS_DEFAULT, CONF_NWK_BACKUP_ENABLED_DEFAULT, CONF_NWK_BACKUP_PERIOD_DEFAULT, CONF_NWK_CHANNEL_DEFAULT, CONF_NWK_CHANNELS_DEFAULT, CONF_NWK_EXTENDED_PAN_ID_DEFAULT, CONF_NWK_KEY_DEFAULT, CONF_NWK_KEY_SEQ_DEFAULT, CONF_NWK_PAN_ID_DEFAULT, CONF_NWK_TC_ADDRESS_DEFAULT, CONF_NWK_TC_LINK_KEY_DEFAULT, CONF_NWK_UPDATE_ID_DEFAULT, CONF_NWK_VALIDATE_SETTINGS_DEFAULT, CONF_OTA_ALLOW_FILE_PROVIDERS_DEFAULT, CONF_OTA_IKEA_DEFAULT, CONF_OTA_INOVELLI_DEFAULT, CONF_OTA_LEDVANCE_DEFAULT, CONF_OTA_OTAU_DIR_DEFAULT, CONF_OTA_SALUS_DEFAULT, CONF_OTA_SONOFF_DEFAULT, CONF_OTA_THIRDREALITY_DEFAULT, CONF_SOURCE_ROUTING_DEFAULT, CONF_STARTUP_ENERGY_SCAN_DEFAULT, CONF_TOPO_SCAN_ENABLED_DEFAULT, CONF_TOPO_SCAN_PERIOD_DEFAULT, CONF_TOPO_SKIP_COORDINATOR_DEFAULT, CONF_WATCHDOG_ENABLED_DEFAULT, ) from zigpy.config.validators import ( cv_boolean, cv_deprecated, cv_exact_object, cv_hex, cv_key, cv_simple_descriptor, ) import zigpy.types as t CONF_ADDITIONAL_ENDPOINTS = "additional_endpoints" CONF_DATABASE = "database_path" CONF_DEVICE = "device" CONF_DEVICE_PATH = "path" CONF_DEVICE_BAUDRATE = "baudrate" CONF_DEVICE_FLOW_CONTROL = "flow_control" CONF_MAX_CONCURRENT_REQUESTS = "max_concurrent_requests" CONF_NWK = "network" CONF_NWK_CHANNEL = "channel" CONF_NWK_CHANNELS = "channels" CONF_NWK_EXTENDED_PAN_ID = "extended_pan_id" CONF_NWK_PAN_ID = "pan_id" CONF_NWK_KEY = "key" CONF_NWK_KEY_SEQ = "key_sequence_number" CONF_NWK_TC_ADDRESS = "tc_address" CONF_NWK_TC_LINK_KEY = "tc_link_key" CONF_NWK_UPDATE_ID = "update_id" CONF_NWK_BACKUP_ENABLED = "backup_enabled" CONF_NWK_BACKUP_PERIOD = "backup_period" CONF_NWK_VALIDATE_SETTINGS = "validate_network_settings" CONF_OTA = "ota" CONF_OTA_ALLOW_FILE_PROVIDERS = "allow_file_providers" CONF_OTA_DIR = "otau_directory" CONF_OTA_IKEA = "ikea_provider" CONF_OTA_IKEA_URL = "ikea_update_url" CONF_OTA_INOVELLI = "inovelli_provider" CONF_OTA_LEDVANCE = "ledvance_provider" CONF_OTA_SALUS = "salus_provider" CONF_OTA_SONOFF = "sonoff_provider" CONF_OTA_SONOFF_URL = "sonoff_update_url" CONF_OTA_THIRDREALITY = "thirdreality_provider" CONF_OTA_REMOTE_PROVIDERS = "remote_providers" CONF_OTA_PROVIDER_URL = "url" CONF_OTA_PROVIDER_MANUF_IDS = "manufacturer_ids" CONF_SOURCE_ROUTING = "source_routing" CONF_STARTUP_ENERGY_SCAN = "startup_energy_scan" CONF_TOPO_SCAN_PERIOD = "topology_scan_period" CONF_TOPO_SCAN_ENABLED = "topology_scan_enabled" CONF_TOPO_SKIP_COORDINATOR = "topology_scan_skip_coordinator" CONF_WATCHDOG_ENABLED = "watchdog_enabled" CONF_OTA_ALLOW_FILE_PROVIDERS_STRING = ( "I understand I can *destroy* my devices by enabling OTA updates from files." " Some OTA updates can be mistakenly applied to the wrong device, breaking it." " I am consciously using this at my own risk." ) SCHEMA_DEVICE = vol.Schema( { vol.Required(CONF_DEVICE_PATH): str, vol.Optional(CONF_DEVICE_BAUDRATE, default=CONF_DEVICE_BAUDRATE_DEFAULT): int, vol.Optional( CONF_DEVICE_FLOW_CONTROL, default=CONF_DEVICE_FLOW_CONTROL_DEFAULT ): vol.In(["hardware", "software", None]), } ) SCHEMA_NETWORK = vol.Schema( { vol.Optional(CONF_NWK_CHANNEL, default=CONF_NWK_CHANNEL_DEFAULT): vol.Any( None, vol.All(cv_hex, vol.Range(min=11, max=26)) ), vol.Optional(CONF_NWK_CHANNELS, default=CONF_NWK_CHANNELS_DEFAULT): vol.Any( t.Channels, vol.All(list, t.Channels.from_channel_list) ), vol.Optional( CONF_NWK_EXTENDED_PAN_ID, default=CONF_NWK_EXTENDED_PAN_ID_DEFAULT ): vol.Any(None, t.ExtendedPanId, t.ExtendedPanId.convert), vol.Optional(CONF_NWK_KEY, default=CONF_NWK_KEY_DEFAULT): vol.Any(None, cv_key), vol.Optional(CONF_NWK_KEY_SEQ, default=CONF_NWK_KEY_SEQ_DEFAULT): vol.Range( min=0, max=255 ), vol.Optional(CONF_NWK_PAN_ID, default=CONF_NWK_PAN_ID_DEFAULT): vol.Any( None, t.PanId, vol.All(cv_hex, vol.Coerce(t.PanId)) ), vol.Optional(CONF_NWK_TC_ADDRESS, default=CONF_NWK_TC_ADDRESS_DEFAULT): vol.Any( None, t.EUI64, t.EUI64.convert ), vol.Optional( CONF_NWK_TC_LINK_KEY, default=CONF_NWK_TC_LINK_KEY_DEFAULT ): cv_key, vol.Optional(CONF_NWK_UPDATE_ID, default=CONF_NWK_UPDATE_ID_DEFAULT): vol.All( cv_hex, vol.Range(min=0, max=255) ), } ) SCHEMA_OTA_PROVIDER = vol.Schema( { vol.Required(CONF_OTA_PROVIDER_URL): str, vol.Optional(CONF_OTA_PROVIDER_MANUF_IDS, default=[]): [cv_hex], } ) SCHEMA_OTA = { vol.Optional(CONF_OTA_DIR, default=CONF_OTA_OTAU_DIR_DEFAULT): vol.Any(None, str), vol.Optional( CONF_OTA_ALLOW_FILE_PROVIDERS, default=CONF_OTA_ALLOW_FILE_PROVIDERS_DEFAULT ): vol.All(cv_exact_object(CONF_OTA_ALLOW_FILE_PROVIDERS_STRING)), vol.Optional(CONF_OTA_IKEA, default=CONF_OTA_IKEA_DEFAULT): cv_boolean, vol.Optional(CONF_OTA_INOVELLI, default=CONF_OTA_INOVELLI_DEFAULT): cv_boolean, vol.Optional(CONF_OTA_LEDVANCE, default=CONF_OTA_LEDVANCE_DEFAULT): cv_boolean, vol.Optional(CONF_OTA_SALUS, default=CONF_OTA_SALUS_DEFAULT): cv_boolean, vol.Optional(CONF_OTA_SONOFF, default=CONF_OTA_SONOFF_DEFAULT): cv_boolean, vol.Optional(CONF_OTA_SONOFF_URL): vol.Url(), vol.Optional( CONF_OTA_THIRDREALITY, default=CONF_OTA_THIRDREALITY_DEFAULT ): cv_boolean, vol.Optional(CONF_OTA_REMOTE_PROVIDERS, default=[]): [SCHEMA_OTA_PROVIDER], # Deprecated keys vol.Optional(CONF_OTA_IKEA_URL): vol.All( cv_deprecated("The `ikea_update_url` key is deprecated and should be removed"), vol.Url(), ), } ZIGPY_SCHEMA = vol.Schema( { vol.Optional(CONF_DATABASE, default=None): vol.Any(None, str), vol.Optional(CONF_NWK, default={}): SCHEMA_NETWORK, vol.Optional(CONF_OTA, default={}): SCHEMA_OTA, vol.Optional( CONF_TOPO_SCAN_PERIOD, default=CONF_TOPO_SCAN_PERIOD_DEFAULT ): vol.All(int, vol.Range(min=20)), vol.Optional( CONF_TOPO_SCAN_ENABLED, default=CONF_TOPO_SCAN_ENABLED_DEFAULT ): cv_boolean, vol.Optional( CONF_TOPO_SKIP_COORDINATOR, default=CONF_TOPO_SKIP_COORDINATOR_DEFAULT ): cv_boolean, vol.Optional( CONF_NWK_BACKUP_ENABLED, default=CONF_NWK_BACKUP_ENABLED_DEFAULT ): cv_boolean, vol.Optional( CONF_NWK_BACKUP_PERIOD, default=CONF_NWK_BACKUP_PERIOD_DEFAULT ): vol.All(cv_hex, vol.Range(min=1)), vol.Optional( CONF_NWK_VALIDATE_SETTINGS, default=CONF_NWK_VALIDATE_SETTINGS_DEFAULT ): cv_boolean, vol.Optional(CONF_ADDITIONAL_ENDPOINTS, default=[]): [cv_simple_descriptor], vol.Optional( CONF_MAX_CONCURRENT_REQUESTS, default=CONF_MAX_CONCURRENT_REQUESTS_DEFAULT ): vol.All(int, vol.Range(min=0)), vol.Optional(CONF_SOURCE_ROUTING, default=CONF_SOURCE_ROUTING_DEFAULT): ( cv_boolean ), vol.Optional( CONF_STARTUP_ENERGY_SCAN, default=CONF_STARTUP_ENERGY_SCAN_DEFAULT ): cv_boolean, vol.Optional( CONF_WATCHDOG_ENABLED, default=CONF_WATCHDOG_ENABLED_DEFAULT ): cv_boolean, }, extra=vol.ALLOW_EXTRA, ) CONFIG_SCHEMA = ZIGPY_SCHEMA.extend( {vol.Required(CONF_DEVICE): SCHEMA_DEVICE}, extra=vol.ALLOW_EXTRA ) zigpy-0.62.3/zigpy/config/defaults.py000066400000000000000000000022231456054056700175470ustar00rootroot00000000000000from __future__ import annotations import zigpy.types as t CONF_DEVICE_BAUDRATE_DEFAULT = 115200 CONF_DEVICE_FLOW_CONTROL_DEFAULT = None CONF_STARTUP_ENERGY_SCAN_DEFAULT = True CONF_MAX_CONCURRENT_REQUESTS_DEFAULT = 8 CONF_NWK_BACKUP_ENABLED_DEFAULT = True CONF_NWK_BACKUP_PERIOD_DEFAULT = 24 * 60 # 24 hours CONF_NWK_CHANNEL_DEFAULT = None CONF_NWK_CHANNELS_DEFAULT = [11, 15, 20, 25] CONF_NWK_EXTENDED_PAN_ID_DEFAULT = None CONF_NWK_PAN_ID_DEFAULT = None CONF_NWK_KEY_DEFAULT = None CONF_NWK_KEY_SEQ_DEFAULT = 0x00 CONF_NWK_TC_ADDRESS_DEFAULT = None CONF_NWK_TC_LINK_KEY_DEFAULT = t.KeyData(b"ZigBeeAlliance09") CONF_NWK_UPDATE_ID_DEFAULT = 0x00 CONF_NWK_VALIDATE_SETTINGS_DEFAULT = False CONF_OTA_ALLOW_FILE_PROVIDERS_DEFAULT = False CONF_OTA_IKEA_DEFAULT = False CONF_OTA_INOVELLI_DEFAULT = True CONF_OTA_LEDVANCE_DEFAULT = True CONF_OTA_OTAU_DIR_DEFAULT = None CONF_OTA_SALUS_DEFAULT = True CONF_OTA_SONOFF_DEFAULT = True CONF_OTA_THIRDREALITY_DEFAULT = True CONF_SOURCE_ROUTING_DEFAULT = False CONF_TOPO_SCAN_PERIOD_DEFAULT = 4 * 60 # 4 hours CONF_TOPO_SCAN_ENABLED_DEFAULT = True CONF_TOPO_SKIP_COORDINATOR_DEFAULT = False CONF_WATCHDOG_ENABLED_DEFAULT = True zigpy-0.62.3/zigpy/config/validators.py000066400000000000000000000051241456054056700201130ustar00rootroot00000000000000from __future__ import annotations import typing import warnings import voluptuous as vol import zigpy.types as t import zigpy.zdo.types as zdo_t def cv_boolean(value: bool | int | str) -> bool: """Validate and coerce a boolean value.""" if isinstance(value, bool): return value if isinstance(value, str): value = value.lower().strip() if value in ("1", "true", "yes", "on", "enable"): return True if value in ("0", "false", "no", "off", "disable"): return False elif isinstance(value, int): return bool(value) raise vol.Invalid(f"invalid boolean '{value}' value") def cv_hex(value: int | str) -> int: """Convert string with possible hex number into int.""" if isinstance(value, int): return value if not isinstance(value, str): raise vol.Invalid(f"{value} is not a valid hex number") try: if value.startswith("0x"): value = int(value, base=16) else: value = int(value) except ValueError: raise vol.Invalid(f"Could not convert '{value}' to number") return value def cv_key(key: list[int]) -> t.KeyData: """Validate a key.""" if not isinstance(key, list) or not all(isinstance(v, int) for v in key): raise vol.Invalid("key must be a list of integers") if len(key) != 16: raise vol.Invalid("key length must be 16") if not all(0 <= e <= 255 for e in key): raise vol.Invalid("Key bytes must be within (0..255) range") return t.KeyData(key) def cv_simple_descriptor(obj: dict[str, typing.Any]) -> zdo_t.SimpleDescriptor: """Validates a ZDO simple descriptor.""" if isinstance(obj, zdo_t.SimpleDescriptor): return obj elif not isinstance(obj, dict): raise vol.Invalid("Not a dictionary") descriptor = zdo_t.SimpleDescriptor(**obj) if not descriptor.is_valid: raise vol.Invalid(f"Invalid simple descriptor {descriptor!r}") return descriptor def cv_deprecated(message: str) -> typing.Callable[[typing.Any], typing.Any]: """Factory function for creating a deprecation warning validator.""" def wrapper(obj: typing.Any) -> typing.Any: warnings.warn(message, DeprecationWarning, stacklevel=2) return obj return wrapper def cv_exact_object(expected_value: str) -> typing.Callable[[typing.Any], bool]: """Factory function for creating an exact object comparison validator.""" def wrapper(obj: typing.Any) -> typing.Any: if obj != expected_value: return False return expected_value return wrapper zigpy-0.62.3/zigpy/const.py000066400000000000000000000012031456054056700156160ustar00rootroot00000000000000from __future__ import annotations """Zigpy Constants.""" SIG_ENDPOINTS = "endpoints" SIG_EP_INPUT = "input_clusters" SIG_EP_OUTPUT = "output_clusters" SIG_EP_PROFILE = "profile_id" SIG_EP_TYPE = "device_type" SIG_MANUFACTURER = "manufacturer" SIG_MODEL = "model" SIG_MODELS_INFO = "models_info" SIG_NODE_DESC = "node_desc" SIG_SKIP_CONFIG = "skip_configuration" INTERFERENCE_MESSAGE = ( "If you are having problems joining new devices, are missing sensor" " updates, or have issues keeping devices joined, ensure your" " coordinator is away from interference sources such as USB 3.0" " devices, SSDs, WiFi routers, etc." ) zigpy-0.62.3/zigpy/datastructures.py000066400000000000000000000127361456054056700175620ustar00rootroot00000000000000"""Primitive data structures.""" from __future__ import annotations import asyncio import contextlib import functools import heapq import types import typing class WrappedContextManager: def __init__( self, context_manager: contextlib.AbstractAsyncContextManager, on_enter: typing.Callable[[], typing.Awaitable[None]], ) -> None: self.on_enter = on_enter self.context_manager = context_manager async def __aenter__(self) -> None: await self.on_enter() return self.context_manager async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, traceback: types.TracebackType | None, ) -> None: await self.context_manager.__aexit__(exc_type, exc, traceback) class PriorityDynamicBoundedSemaphore(asyncio.Semaphore): """`asyncio.BoundedSemaphore` with public interface to change the max value.""" def __init__(self, value: int = 0) -> None: self._value: int = value self._max_value: int = value self._comparison_counter: int = 0 self._waiters: list[tuple[int, int, asyncio.Future]] = [] self._wakeup_scheduled: bool = False @property @functools.lru_cache(maxsize=None) def _loop(self) -> asyncio.BaseEventLoop: return asyncio.get_running_loop() def _wake_up_next(self) -> None: while self._waiters: _, _, waiter = heapq.heappop(self._waiters) if not waiter.done(): waiter.set_result(None) self._wakeup_scheduled = True return @property def value(self) -> int: return self._value @property def max_value(self) -> int: return self._max_value @max_value.setter def max_value(self, new_value: int) -> None: """Update the semaphore's max value.""" if new_value < 0: raise ValueError(f"Semaphore value must be >= 0: {new_value!r}") delta = new_value - self._max_value self._value += delta self._max_value += delta # Wake up any pending waiters for _ in range(min(len(self._waiters), max(0, delta))): self._wake_up_next() @property def num_waiting(self) -> int: return len(self._waiters) def locked(self) -> bool: """Returns True if semaphore cannot be acquired immediately.""" return self._value <= 0 async def acquire(self, priority: int = 0) -> typing.Literal[True]: """Acquire a semaphore. If the internal counter is larger than zero on entry, decrement it by one and return True immediately. If it is zero on entry, block, waiting until some other coroutine has called release() to make it larger than 0, and then return True. """ # _wakeup_scheduled is set if *another* task is scheduled to wakeup # but its acquire() is not resumed yet while self._wakeup_scheduled or self._value <= 0: # To ensure that our objects don't have to be themselves comparable, we # maintain a global count and increment it on every insert. This way, # the tuple `(-priority, count, item)` will never have to compare `item`. self._comparison_counter += 1 fut = self._loop.create_future() obj = (-priority, self._comparison_counter, fut) heapq.heappush(self._waiters, obj) try: await fut # reset _wakeup_scheduled *after* waiting for a future self._wakeup_scheduled = False except asyncio.CancelledError: self._wake_up_next() raise assert self._value > 0 self._value -= 1 return True def release(self) -> None: """Release a semaphore, incrementing the internal counter by one. When it was zero on entry and another coroutine is waiting for it to become larger than zero again, wake up that coroutine. """ if self._value >= self._max_value: raise ValueError("Semaphore released too many times") self._value += 1 self._wake_up_next() def __call__(self, priority: int = 0): """Allows specifying the priority by calling the context manager. This allows both `async with sem:` and `async with sem(priority=5):`. """ return WrappedContextManager( context_manager=self, on_enter=lambda: self.acquire(priority), ) async def __aenter__(self) -> None: await self.acquire() return None async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, traceback: types.TracebackType | None, ) -> None: self.release() def __repr__(self) -> str: if self.locked(): extra = f"locked, max value:{self._max_value}, waiters:{len(self._waiters)}" else: extra = f"unlocked, value:{self._value}, max value:{self._max_value}" return f"<{self.__class__.__name__} [{extra}]>" class PriorityLock(PriorityDynamicBoundedSemaphore): def __init__(self): super().__init__(value=1) @PriorityDynamicBoundedSemaphore.max_value.setter def max_value(self, new_value: int) -> None: """Update the locks's max value.""" raise ValueError("Max value of lock cannot be updated") # Backwards compatibility DynamicBoundedSemaphore = PriorityDynamicBoundedSemaphore zigpy-0.62.3/zigpy/device.py000066400000000000000000000521021456054056700157330ustar00rootroot00000000000000from __future__ import annotations import asyncio from datetime import datetime, timezone import enum import itertools import logging import sys import typing import warnings from zigpy.ota.manager import update_firmware if sys.version_info[:2] < (3, 11): from async_timeout import timeout as asyncio_timeout # pragma: no cover else: from asyncio import timeout as asyncio_timeout # pragma: no cover from zigpy.const import ( SIG_ENDPOINTS, SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE, SIG_MANUFACTURER, SIG_MODEL, SIG_NODE_DESC, ) import zigpy.endpoint import zigpy.exceptions import zigpy.listeners import zigpy.types as t from zigpy.typing import AddressingMode import zigpy.util import zigpy.zcl.foundation as foundation import zigpy.zdo as zdo import zigpy.zdo.types as zdo_t if typing.TYPE_CHECKING: from zigpy.application import ControllerApplication from zigpy.ota.image import BaseOTAImage APS_REPLY_TIMEOUT = 5 APS_REPLY_TIMEOUT_EXTENDED = 28 LOGGER = logging.getLogger(__name__) class Status(enum.IntEnum): """The status of a Device. Maintained for backwards compatibility.""" # No initialization done NEW = 0 # ZDO endpoint discovery done ZDO_INIT = 1 # Endpoints initialized ENDPOINTS_INIT = 2 class Device(zigpy.util.LocalLogMixin, zigpy.util.ListenableMixin): """A device on the network""" manufacturer_id_override = None def __init__(self, application: ControllerApplication, ieee: t.EUI64, nwk: t.NWK): self._application: ControllerApplication = application self._ieee: t.EUI64 = ieee self.nwk: t.NWK = t.NWK(nwk) self.zdo: zdo.ZDO = zdo.ZDO(self) self.endpoints: dict[int, zdo.ZDO | zigpy.endpoint.Endpoint] = {0: self.zdo} self.lqi: int | None = None self.rssi: int | None = None self.ota_in_progress: bool = False self._last_seen: datetime | None = None self._initialize_task: asyncio.Task | None = None self._group_scan_task: asyncio.Task | None = None self._listeners = {} self._manufacturer: str | None = None self._model: str | None = None self.node_desc: zdo_t.NodeDescriptor | None = None self._pending: zigpy.util.Requests[t.uint8_t] = zigpy.util.Requests() self._relays: t.Relays | None = None self._skip_configuration: bool = False self._send_sequence: int = 0 # Retained for backwards compatibility, will be removed in a future release self.status = Status.NEW def get_sequence(self) -> t.uint8_t: self._send_sequence = (self._send_sequence + 1) % 256 return self._send_sequence @property def name(self) -> str: return f"0x{self.nwk:04X}" def update_last_seen(self) -> None: """Update the `last_seen` attribute to the current time and emit an event.""" warnings.warn( "Calling `update_last_seen` directly is deprecated", DeprecationWarning ) self.last_seen = datetime.now(timezone.utc) @property def last_seen(self) -> float | None: return self._last_seen.timestamp() if self._last_seen is not None else None @last_seen.setter def last_seen(self, value: datetime | int | float): if isinstance(value, (int, float)): value = datetime.fromtimestamp(value, timezone.utc) self._last_seen = value self.listener_event("device_last_seen_updated", self._last_seen) @property def non_zdo_endpoints(self) -> list[zigpy.endpoint.Endpoint]: return [ ep for epid, ep in self.endpoints.items() if not (isinstance(ep, zdo.ZDO)) ] @property def has_non_zdo_endpoints(self) -> bool: return bool(self.non_zdo_endpoints) @property def all_endpoints_init(self) -> bool: return self.has_non_zdo_endpoints and all( ep.status != zigpy.endpoint.Status.NEW for ep in self.non_zdo_endpoints ) @property def is_initialized(self) -> bool: return self.node_desc is not None and self.all_endpoints_init def schedule_group_membership_scan(self) -> asyncio.Task: """Rescan device group's membership.""" if self._group_scan_task and not self._group_scan_task.done(): self.debug("Cancelling old group rescan") self._group_scan_task.cancel() self._group_scan_task = asyncio.create_task(self.group_membership_scan()) return self._group_scan_task async def group_membership_scan(self) -> None: """Sync up group membership.""" for ep in self.non_zdo_endpoints: await ep.group_membership_scan() @property def initializing(self) -> bool: """Return True if device is being initialized.""" return self._initialize_task is not None and not self._initialize_task.done() def cancel_initialization(self) -> None: """Cancel initialization call.""" if self.initializing: self.debug("Canceling old initialize call") self._initialize_task.cancel() # type:ignore[union-attr] def schedule_initialize(self) -> asyncio.Task | None: # Already-initialized devices don't need to be re-initialized if self.is_initialized: self.debug("Skipping initialization, device is fully initialized") self._application.device_initialized(self) return None self.debug("Scheduling initialization") self.cancel_initialization() self._initialize_task = asyncio.create_task(self.initialize()) return self._initialize_task async def get_node_descriptor(self) -> zdo_t.NodeDescriptor: self.info("Requesting 'Node Descriptor'") status, _, node_desc = await self.zdo.Node_Desc_req(self.nwk) if status != zdo_t.Status.SUCCESS: raise zigpy.exceptions.InvalidResponse( f"Requesting Node Descriptor failed: {status}" ) self.node_desc = node_desc self.info("Got Node Descriptor: %s", node_desc) return node_desc async def initialize(self) -> None: try: await self._initialize() except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException): self.application.listener_event("device_init_failure", self) except Exception: LOGGER.warning( "Device %r failed to initialize due to unexpected error", self, exc_info=True, ) self.application.listener_event("device_init_failure", self) @zigpy.util.retryable_request(tries=5, delay=0.5) async def _initialize(self) -> None: """Attempts multiple times to discover all basic information about a device: namely its node descriptor, all endpoints and clusters, and the model and manufacturer attributes from any Basic cluster exposing those attributes. """ # Some devices are improperly initialized and are missing a node descriptor if self.node_desc is None: await self.get_node_descriptor() # Devices should have endpoints other than ZDO if self.has_non_zdo_endpoints: self.info("Already have endpoints: %s", self.endpoints) else: self.info("Discovering endpoints") status, _, endpoints = await self.zdo.Active_EP_req(self.nwk) if status != zdo_t.Status.SUCCESS: raise zigpy.exceptions.InvalidResponse( f"Endpoint request failed: {status}" ) self.info("Discovered endpoints: %s", endpoints) for endpoint_id in endpoints: if endpoint_id != 0: self.add_endpoint(endpoint_id) self.status = Status.ZDO_INIT # Initialize all of the discovered endpoints if self.all_endpoints_init: self.info( "All endpoints are already initialized: %s", self.non_zdo_endpoints ) else: self.info("Initializing endpoints %s", self.non_zdo_endpoints) for ep in self.non_zdo_endpoints: await ep.initialize() # Query model info if self.model is not None and self.manufacturer is not None: self.info("Already have model and manufacturer info") else: for ep in self.non_zdo_endpoints: if self.model is None or self.manufacturer is None: model, manufacturer = await ep.get_model_info() self.info( "Read model %r and manufacturer %r from %s", model, manufacturer, ep, ) if model is not None: self.model = model if manufacturer is not None: self.manufacturer = manufacturer self.status = Status.ENDPOINTS_INIT self.info("Discovered basic device information for %s", self) # Signal to the application that the device is ready self._application.device_initialized(self) def add_endpoint(self, endpoint_id) -> zigpy.endpoint.Endpoint: ep = zigpy.endpoint.Endpoint(self, endpoint_id) self.endpoints[endpoint_id] = ep return ep async def add_to_group(self, grp_id: int, name: str = None) -> None: for ep in self.non_zdo_endpoints: await ep.add_to_group(grp_id, name) async def remove_from_group(self, grp_id: int) -> None: for ep in self.non_zdo_endpoints: await ep.remove_from_group(grp_id) async def request( self, profile, cluster, src_ep, dst_ep, sequence, data, expect_reply=True, timeout=APS_REPLY_TIMEOUT, use_ieee=False, ): extended_timeout = False if expect_reply and (self.node_desc is None or self.node_desc.is_end_device): self.debug("Extending timeout for 0x%02x request", sequence) timeout = APS_REPLY_TIMEOUT_EXTENDED extended_timeout = True # Use a lambda so we don't leave the coroutine unawaited in case of an exception send_request = lambda: self._application.request( # noqa: E731 self, profile, cluster, src_ep, dst_ep, sequence, data, expect_reply=expect_reply, use_ieee=use_ieee, extended_timeout=extended_timeout, ) if not expect_reply: await send_request() return None # Only create a pending request if we are expecting a reply with self._pending.new(sequence) as req: await send_request() async with asyncio_timeout(timeout): return await req.result def handle_message( self, profile: int, cluster: int, src_ep: int, dst_ep: int, message: bytes, *, dst_addressing: AddressingMode | None = None, ): """Deprecated compatibility function. Use `packet_received` instead.""" warnings.warn( "`handle_message` is deprecated, use `packet_received`", DeprecationWarning ) if dst_addressing is None: dst_addressing = t.AddrMode.NWK self.packet_received( t.ZigbeePacket( profile_id=profile, cluster_id=cluster, src_ep=src_ep, dst_ep=dst_ep, data=t.SerializableBytes(message), dst=t.AddrModeAddress( addr_mode=dst_addressing, address={ t.AddrMode.NWK: self.nwk, t.AddrMode.IEEE: self.ieee, }[dst_addressing], ), ) ) def deserialize(self, endpoint_id, cluster_id, data): """Deprecated compatibility function.""" warnings.warn( "`deserialize` is deprecated, avoid rewriting packet structures this way", DeprecationWarning, ) return self.endpoints[endpoint_id].deserialize(cluster_id, data) def packet_received(self, packet: t.ZigbeePacket) -> None: # Set radio details that can be read from any type of packet self.last_seen = packet.timestamp if packet.lqi is not None: self.lqi = packet.lqi if packet.rssi is not None: self.rssi = packet.rssi # Filter out packets that refer to unknown endpoints or clusters if packet.src_ep not in self.endpoints: self.debug( "Ignoring message on unknown endpoint %s (expected one of %s)", packet.src_ep, self.endpoints, ) return endpoint = self.endpoints[packet.src_ep] # Ignore packets that do not match the endpoint's clusters. # TODO: this isn't actually necessary, we can parse most packets by cluster ID. if ( packet.dst_ep != zdo.ZDO_ENDPOINT and packet.cluster_id not in endpoint.in_clusters and packet.cluster_id not in endpoint.out_clusters ): self.debug( "Ignoring message on unknown cluster %s for endpoint %s", packet.cluster_id, endpoint, ) return # Parse the ZCL/ZDO header first. This should never fail. data = packet.data.serialize() if packet.dst_ep == zdo.ZDO_ENDPOINT: hdr, _ = zdo_t.ZDOHeader.deserialize(packet.cluster_id, data) else: hdr, _ = foundation.ZCLHeader.deserialize(data) try: if ( type(self).deserialize is not Device.deserialize or getattr(self.deserialize, "__func__", None) is not Device.deserialize ): # XXX: support for custom deserialization will be removed hdr, args = self.deserialize(packet.src_ep, packet.cluster_id, data) else: # Next, parse the ZCL/ZDO payload # FIXME: ZCL deserialization mutates the header! hdr, args = endpoint.deserialize(packet.cluster_id, data) except Exception as exc: error = zigpy.exceptions.ParsingError() error.__cause__ = exc self.debug("Failed to parse packet %r", packet, exc_info=error) else: error = None # Resolve the future if this is a response to a request if hdr.tsn in self._pending and ( hdr.direction == foundation.Direction.Server_to_Client if isinstance(hdr, foundation.ZCLHeader) else hdr.is_reply ): future = self._pending[hdr.tsn] try: if error is not None: future.result.set_exception(error) else: future.result.set_result(args) except asyncio.InvalidStateError: self.debug( ( "Invalid state on future for 0x%02x seq " "-- probably duplicate response" ), hdr.tsn, ) return if error is not None: return # Pass the request off to a listener, if one is registered for listener in itertools.chain( self._application._req_listeners[zigpy.listeners.ANY_DEVICE], self._application._req_listeners[self], ): # Resolve only until the first future listener if listener.resolve(hdr, args) and isinstance( listener, zigpy.listeners.FutureListener ): break # Finally, pass it off to the endpoint message handler. This will be removed. endpoint.handle_message( packet.profile_id, packet.cluster_id, hdr, args, dst_addressing=packet.dst.addr_mode if packet.dst is not None else None, ) async def reply( self, profile, cluster, src_ep, dst_ep, sequence, data, use_ieee=False ): return await self.request( profile, cluster, src_ep, dst_ep, sequence, data, expect_reply=False, use_ieee=use_ieee, ) async def update_firmware( self, firmware_image: BaseOTAImage, progress_callback: callable = None, force: bool = False, ) -> foundation.Status: """Update device firmware.""" if self.ota_in_progress: self.debug("OTA already in progress") return self.ota_in_progress = True try: result = await update_firmware( self, firmware_image, progress_callback, force ) except Exception as exc: self.ota_in_progress = False self.debug("OTA failed!", exc_info=exc) raise exc self.ota_in_progress = False return result def radio_details(self, lqi=None, rssi=None) -> None: if lqi is not None: self.lqi = lqi if rssi is not None: self.rssi = rssi def log(self, lvl, msg, *args, **kwargs) -> None: msg = "[0x%04x] " + msg args = (self.nwk,) + args LOGGER.log(lvl, msg, *args, **kwargs) @property def application(self) -> ControllerApplication: return self._application @property def ieee(self) -> t.EUI64: return self._ieee @property def manufacturer(self) -> str | None: return self._manufacturer @manufacturer.setter def manufacturer(self, value) -> None: if isinstance(value, str): self._manufacturer = value @property def manufacturer_id(self) -> int | None: """Return manufacturer id.""" if self.manufacturer_id_override: return self.manufacturer_id_override elif self.node_desc is not None: return self.node_desc.manufacturer_code else: return None @property def model(self) -> str | None: return self._model @model.setter def model(self, value) -> None: if isinstance(value, str): self._model = value @property def skip_configuration(self) -> bool: return self._skip_configuration @skip_configuration.setter def skip_configuration(self, should_skip_configuration) -> None: if isinstance(should_skip_configuration, bool): self._skip_configuration = should_skip_configuration else: self._skip_configuration = False @property def relays(self) -> t.Relays | None: """Relay list.""" return self._relays @relays.setter def relays(self, relays: t.Relays | None) -> None: if relays is None: pass elif not isinstance(relays, t.Relays): relays = t.Relays(relays) self._relays = relays self.listener_event("device_relays_updated", relays) def __getitem__(self, key): return self.endpoints[key] def get_signature(self) -> dict[str, typing.Any]: # return the device signature by providing essential device information # - Model Identifier ( Attribute 0x0005 of Basic Cluster 0x0000 ) # - Manufacturer Name ( Attribute 0x0004 of Basic Cluster 0x0000 ) # - Endpoint list # - Profile Id, Device Id, Cluster Out, Cluster In signature: dict[str, typing.Any] = {} if self._manufacturer is not None: signature[SIG_MANUFACTURER] = self.manufacturer if self._model is not None: signature[SIG_MODEL] = self._model if self.node_desc is not None: signature[SIG_NODE_DESC] = self.node_desc.as_dict() for endpoint_id, endpoint in self.endpoints.items(): if endpoint_id == 0: # ZDO continue signature.setdefault(SIG_ENDPOINTS, {}) in_clusters = list(endpoint.in_clusters) out_clusters = list(endpoint.out_clusters) signature[SIG_ENDPOINTS][endpoint_id] = { SIG_EP_PROFILE: endpoint.profile_id, SIG_EP_TYPE: endpoint.device_type, SIG_EP_INPUT: in_clusters, SIG_EP_OUTPUT: out_clusters, } return signature def __repr__(self) -> str: return ( f"<" f"{type(self).__name__}" f" model={self.model!r}" f" manuf={self.manufacturer!r}" f" nwk={t.NWK(self.nwk)}" f" ieee={self.ieee}" f" is_initialized={self.is_initialized}" f">" ) async def broadcast( app, profile, cluster, src_ep, dst_ep, grpid, radius, sequence, data, broadcast_address=t.BroadcastAddress.RX_ON_WHEN_IDLE, ): return await app.broadcast( profile, cluster, src_ep, dst_ep, grpid, radius, sequence, data, broadcast_address=broadcast_address, ) zigpy-0.62.3/zigpy/endpoint.py000066400000000000000000000267441456054056700163310ustar00rootroot00000000000000from __future__ import annotations import asyncio import enum import logging from typing import Any import zigpy.exceptions import zigpy.profiles import zigpy.types as t from zigpy.typing import AddressingMode, DeviceType import zigpy.util import zigpy.zcl from zigpy.zcl.foundation import ( GENERAL_COMMANDS, CommandSchema, GeneralCommand, Status as ZCLStatus, ZCLHeader, ) from zigpy.zdo.types import Status as ZDOStatus LOGGER = logging.getLogger(__name__) class Status(enum.IntEnum): """The status of an Endpoint""" # No initialization is done NEW = 0 # Endpoint information (device type, clusters, etc) init done ZDO_INIT = 1 # Endpoint Inactive ENDPOINT_INACTIVE = 3 class Endpoint(zigpy.util.LocalLogMixin, zigpy.util.ListenableMixin): """An endpoint on a device on the network""" def __init__(self, device: DeviceType, endpoint_id: int) -> None: self._device: DeviceType = device self._endpoint_id: int = endpoint_id self._listeners: dict = {} self.status: Status = Status.NEW self.profile_id: int | None = None self.device_type: zigpy.profiles.zha.DeviceType | None = None self.in_clusters: dict = {} self.out_clusters: dict = {} self._cluster_attr: dict = {} self._member_of: dict = {} self._manufacturer: str | None = None self._model: str | None = None async def initialize(self) -> None: self.info("Discovering endpoint information") if self.profile_id is not None or self.status == Status.ENDPOINT_INACTIVE: self.info("Endpoint descriptor already queried") else: status, _, sd = await self._device.zdo.Simple_Desc_req( self._device.nwk, self._endpoint_id ) if status == ZDOStatus.NOT_ACTIVE: # These endpoints are essentially junk but this lets the device join self.status = Status.ENDPOINT_INACTIVE return elif status != ZDOStatus.SUCCESS: raise zigpy.exceptions.InvalidResponse( "Failed to retrieve service descriptor: %s", status ) self.info("Discovered endpoint information: %s", sd) self.profile_id = sd.profile self.device_type = sd.device_type if self.profile_id == zigpy.profiles.zha.PROFILE_ID: self.device_type = zigpy.profiles.zha.DeviceType(self.device_type) elif self.profile_id == zigpy.profiles.zll.PROFILE_ID: self.device_type = zigpy.profiles.zll.DeviceType(self.device_type) for cluster in sd.input_clusters: self.add_input_cluster(cluster) for cluster in sd.output_clusters: self.add_output_cluster(cluster) self.status = Status.ZDO_INIT def add_input_cluster( self, cluster_id: int, cluster: zigpy.zcl.Cluster | None = None ) -> zigpy.zcl.Cluster: """Adds an endpoint's input cluster (a server cluster supported by the device) """ if cluster is None: if cluster_id in self.in_clusters: return self.in_clusters[cluster_id] cluster = zigpy.zcl.Cluster.from_id(self, cluster_id, is_server=True) self.in_clusters[cluster_id] = cluster if cluster.ep_attribute is not None: self._cluster_attr[cluster.ep_attribute] = cluster if self._device.application._dblistener is not None: listener = zigpy.zcl.ClusterPersistingListener( self._device.application._dblistener, cluster ) cluster.add_listener(listener) return cluster def add_output_cluster( self, cluster_id: int, cluster: zigpy.zcl.Cluster | None = None ) -> zigpy.zcl.Cluster: """Adds an endpoint's output cluster (a client cluster supported by the device) """ if cluster is None: if cluster_id in self.out_clusters: return self.out_clusters[cluster_id] cluster = zigpy.zcl.Cluster.from_id(self, cluster_id, is_server=False) self.out_clusters[cluster_id] = cluster return cluster async def add_to_group(self, grp_id: int, name: str | None = None) -> ZCLStatus: try: res = await self.groups.add(grp_id, name) except AttributeError: self.debug("Cannot add 0x%04x group, no groups cluster", grp_id) return ZCLStatus.FAILURE if res[0] not in (ZCLStatus.SUCCESS, ZCLStatus.DUPLICATE_EXISTS): self.debug("Couldn't add to 0x%04x group: %s", grp_id, res[0]) return res[0] group = self.device.application.groups.add_group(grp_id, name) group.add_member(self) return res[0] async def remove_from_group(self, grp_id: int) -> ZCLStatus: try: res = await self.groups.remove(grp_id) except AttributeError: self.debug("Cannot remove 0x%04x group, no groups cluster", grp_id) return ZCLStatus.FAILURE if res[0] not in (ZCLStatus.SUCCESS, ZCLStatus.NOT_FOUND): self.debug("Couldn't remove to 0x%04x group: %s", grp_id, res[0]) return res[0] if grp_id in self.device.application.groups: self.device.application.groups[grp_id].remove_member(self) return res[0] async def group_membership_scan(self) -> None: """Sync up group membership.""" try: res = await self.groups.get_membership(groups=[]) except AttributeError: return except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException): self.debug("Failed to sync-up group membership") return if isinstance(res, GENERAL_COMMANDS[GeneralCommand.Default_Response].schema): self.debug("Device does not support group commands: %s", res) return groups = set(res[1]) self.device.application.groups.update_group_membership(self, groups) async def get_model_info(self) -> tuple[str | None, str | None]: if zigpy.zcl.clusters.general.Basic.cluster_id not in self.in_clusters: return None, None # Some devices can't handle multiple attributes in the same read request for names in (["manufacturer", "model"], ["manufacturer"], ["model"]): try: success, failure = await self.basic.read_attributes( names, allow_cache=True ) except asyncio.TimeoutError: # Only swallow the `TimeoutError` on the double attribute read if len(names) == 2: continue raise if "model" in success: self._model = success["model"] if "manufacturer" in success: self._manufacturer = success["manufacturer"] return self._model, self._manufacturer def deserialize( self, cluster_id: t.ClusterId, data: bytes ) -> tuple[ZCLHeader, CommandSchema]: """Deserialize data for ZCL""" if cluster_id not in self.in_clusters and cluster_id not in self.out_clusters: raise KeyError(f"No cluster ID 0x{cluster_id:04x} on {self.unique_id}") cluster = self.in_clusters.get(cluster_id, self.out_clusters.get(cluster_id)) return cluster.deserialize(data) def handle_message( self, profile: int, cluster: int, hdr: ZCLHeader, args: list, *, dst_addressing: AddressingMode | None = None, ) -> None: if cluster in self.in_clusters: handler = self.in_clusters[cluster].handle_message elif cluster in self.out_clusters: handler = self.out_clusters[cluster].handle_message else: self.debug("Message on unknown cluster 0x%04x", cluster) self.listener_event("unknown_cluster_message", hdr.command_id, args) return handler(hdr, args, dst_addressing=dst_addressing) async def request( self, cluster: t.ClusterId, sequence: t.uint8_t, data: bytes, expect_reply: bool = True, command_id: GeneralCommand | t.uint8_t = 0x00, ): if self.profile_id == zigpy.profiles.zll.PROFILE_ID and not ( cluster == zigpy.zcl.clusters.lightlink.LightLink.cluster_id and command_id < 0x40 ): profile_id = zigpy.profiles.zha.PROFILE_ID else: profile_id = self.profile_id return await self.device.request( profile_id, cluster, self._endpoint_id, self._endpoint_id, sequence, data, expect_reply=expect_reply, ) async def reply( self, cluster: t.ClusterId, sequence: t.uint8_t, data: bytes, command_id: GeneralCommand | t.uint8_t = 0x00, ) -> None: if self.profile_id == zigpy.profiles.zll.PROFILE_ID and not ( cluster == zigpy.zcl.clusters.lightlink.LightLink.cluster_id and command_id < 0x40 ): profile_id = zigpy.profiles.zha.PROFILE_ID else: profile_id = self.profile_id return await self.device.reply( profile_id, cluster, self._endpoint_id, self._endpoint_id, sequence, data ) def log(self, lvl: int, msg: str, *args: Any, **kwargs: Any) -> None: msg = "[0x%04x:%s] " + msg args = (self._device.nwk, self._endpoint_id) + args LOGGER.log(lvl, msg, *args, **kwargs) @property def device(self) -> DeviceType: return self._device @property def endpoint_id(self) -> int: return self._endpoint_id @property def manufacturer(self) -> str: if self._manufacturer is not None: return self._manufacturer return self.device.manufacturer @manufacturer.setter def manufacturer(self, value) -> None: self.warning( "Overriding manufacturer from quirks is not supported and " "will be removed in the next zigpy version" ) self._manufacturer = value @property def manufacturer_id(self) -> int | None: """Return device's manufacturer id code.""" return self.device.manufacturer_id @property def member_of(self) -> dict: return self._member_of @property def model(self) -> str: if self._model is not None: return self._model return self.device.model @model.setter def model(self, value) -> None: self.warning( "Overriding model from quirks is not supported and " "will be removed in the next version" ) self._model = value @property def unique_id(self) -> tuple[t.EUI64, int]: return self.device.ieee, self.endpoint_id def __getattr__(self, name: str) -> zigpy.zcl.Cluster: try: return self._cluster_attr[name] except KeyError: raise AttributeError def __repr__(self) -> str: def cluster_repr(clusters): return ", ".join( [f"{c.ep_attribute}:0x{c.cluster_id:04X}" for c in clusters] ) return ( f"<{type(self).__name__}" f" id={self.endpoint_id}" f" in=[{cluster_repr(self.in_clusters.values())}]" f" out=[{cluster_repr(self.out_clusters.values())}]" f" status={self.status!r}" f">" ) zigpy-0.62.3/zigpy/exceptions.py000066400000000000000000000033561456054056700166640ustar00rootroot00000000000000from __future__ import annotations import typing if typing.TYPE_CHECKING: import zigpy.backups class ZigbeeException(Exception): """Base exception class""" class ParsingError(ZigbeeException): """Failed to parse a frame""" class ControllerException(ZigbeeException): """Application controller failed in some way.""" class APIException(ZigbeeException): """Radio API failed in some way.""" class DeliveryError(ZigbeeException): """Message delivery failed in some way""" def __init__(self, message: str, status: int | None = None): super().__init__(message) self.status = status class SendError(DeliveryError): """Message could not be enqueued.""" class InvalidResponse(ZigbeeException): """A ZDO or ZCL response has an unsuccessful status code""" class RadioException(Exception): """Base exception class for radio exceptions""" class TransientConnectionError(RadioException): """Connection to the radio failed but will likely succeed in the near future""" class NetworkNotFormed(RadioException): """A network cannot be started because the radio has no stored network info""" class FormationFailure(RadioException): """Network settings could not be written to the radio""" class NetworkSettingsInconsistent(ZigbeeException): """Loaded network settings are different from what is in the database""" def __init__( self, message: str, new_state: zigpy.backups.NetworkBackup, old_state: zigpy.backups.NetworkBackup, ) -> None: super().__init__(message) self.new_state = new_state self.old_state = old_state class CorruptDatabase(ZigbeeException): """The SQLite database is corrupt or otherwise inconsistent""" zigpy-0.62.3/zigpy/group.py000066400000000000000000000212311456054056700156270ustar00rootroot00000000000000from __future__ import annotations import logging from typing import TYPE_CHECKING, Any from zigpy import types as t from zigpy.endpoint import Endpoint import zigpy.profiles.zha as zha_profile from zigpy.util import ListenableMixin, LocalLogMixin import zigpy.zcl from zigpy.zcl import foundation if TYPE_CHECKING: from zigpy.application import ControllerApplication LOGGER = logging.getLogger(__name__) class Group(ListenableMixin, dict): def __init__( self, group_id: int, name: str | None = None, groups: Groups | None = None, *args: Any, **kwargs: Any, ): super().__init__(*args, **kwargs) self._groups: Groups = groups self._group_id: t.Group = t.Group(group_id) self._name: str = name self._endpoint: GroupEndpoint = GroupEndpoint(self) self._send_sequence = 0 if groups is not None: self.add_listener(groups) def get_sequence(self) -> t.uint8_t: self._send_sequence = (self._send_sequence + 1) % 256 return self._send_sequence def add_member(self, ep: Endpoint, suppress_event: bool = False) -> Group: if not isinstance(ep, Endpoint): raise ValueError(f"{ep} is not {Endpoint.__class__.__name__} class") if ep.unique_id in self: return self[ep.unique_id] self[ep.unique_id] = ep ep.member_of[self.group_id] = self if not suppress_event: self.listener_event("member_added", self, ep) return self def remove_member(self, ep: Endpoint, suppress_event: bool = False) -> Group: self.pop(ep.unique_id, None) ep.member_of.pop(self.group_id, None) if not suppress_event: self.listener_event("member_removed", self, ep) return self async def request(self, profile, cluster, sequence, data, *args, **kwargs): """Send multicast request.""" await self.application.send_packet( t.ZigbeePacket( src_ep=self.application.get_endpoint_id( cluster, is_server_cluster=False ), dst=t.AddrModeAddress( addr_mode=t.AddrMode.Group, address=self.group_id ), tsn=sequence, profile_id=profile, cluster_id=cluster, data=t.SerializableBytes(data), radius=0, non_member_radius=3, ) ) return foundation.GENERAL_COMMANDS[ foundation.GeneralCommand.Default_Response ].schema( status=foundation.Status.SUCCESS, command_id=data[2], ) def __repr__(self) -> str: return "<{} group_id={} name='{}' members={}>".format( self.__class__.__name__, self.group_id, self.name, super().__repr__() ) @property def application(self) -> ControllerApplication: """Expose application to FakeEndpoint/GroupCluster.""" return self.groups.application @property def groups(self) -> Groups: return self._groups @property def group_id(self) -> t.Group: return self._group_id @property def members(self) -> Group: return self @property def name(self) -> str: if self._name is None: return f"No name group {self.group_id}" return self._name @property def endpoint(self) -> GroupEndpoint: return self._endpoint class Groups(ListenableMixin, dict): def __init__(self, app: ControllerApplication, *args: Any, **kwargs: Any): self._application: ControllerApplication = app self._listeners: dict = {} super().__init__(*args, **kwargs) def add_group( self, group_id: int, name: str | None = None, suppress_event: bool = False ) -> Group: if group_id in self: return self[group_id] LOGGER.debug("Adding group: %s, %s", group_id, name) group = Group(group_id, name, self) self[group_id] = group if not suppress_event: self.listener_event("group_added", group) return group def member_added(self, group: Group, ep: Endpoint) -> None: self.listener_event("group_member_added", group, ep) def member_removed(self, group: Group, ep: Endpoint) -> None: self.listener_event("group_member_removed", group, ep) def pop(self, item, *args: Any) -> Group | None: if isinstance(item, Group): group = super().pop(item.group_id, *args) if isinstance(group, Group): for member in (*group.values(),): group.remove_member(member) self.listener_event("group_removed", group) return group group = super().pop(item, *args) if isinstance(group, Group): for member in (*group.values(),): group.remove_member(member) self.listener_event("group_removed", group) return group remove_group = pop def update_group_membership(self, ep: Endpoint, groups: set[int]) -> None: """Sync up device group membership.""" old_groups = { group.group_id for group in self.values() if ep.unique_id in group.members } for grp_id in old_groups - groups: self[grp_id].remove_member(ep) for grp_id in groups - old_groups: group = self.add_group(grp_id) group.add_member(ep) @property def application(self) -> ControllerApplication: """Return application controller.""" return self._application class GroupCluster(zigpy.zcl.Cluster): """Virtual cluster for group requests.""" @classmethod def from_id( cls, group_endpoint: GroupEndpoint, cluster_id: int, is_server=True ) -> zigpy.zcl.Cluster: """Instantiate from ZCL cluster by cluster id.""" if is_server is not True: raise ValueError("Only server clusters are supported for group requests") if cluster_id in cls._registry: return cls._registry[cluster_id](group_endpoint, is_server=True) group_endpoint.debug( "0x%04x cluster id is not supported for group requests", cluster_id ) raise KeyError(f"Unsupported 0x{cluster_id:04x} cluster id for groups") @classmethod def from_attr( cls, group_endpoint: GroupEndpoint, ep_name: str ) -> zigpy.zcl.Cluster: """Instantiate by Cluster name.""" for cluster in cls._registry.values(): if cluster.ep_attribute == ep_name: return cluster(group_endpoint, is_server=True) raise AttributeError(f"Unsupported {ep_name} group cluster") class GroupEndpoint(LocalLogMixin): """Group request handlers. wrapper for virtual clusters. """ def __init__(self, group: Group): """Instantiate GroupRequest.""" self._group: Group = group self._clusters: dict = {} self._cluster_by_attr: dict = {} @property def endpoint_id(self) -> None: return None @property def clusters(self) -> dict: """Group clusters. most of the times, group requests are addressed from client -> server clusters. """ return self._clusters @property def device(self) -> Group: """Group is our fake zigpy device""" return self._group def request(self, cluster, sequence, data, *args, **kwargs): """Send multicast request.""" return self.device.request(zha_profile.PROFILE_ID, cluster, sequence, data) def reply(self, cluster, sequence, data, *args, **kwargs): """Send multicast reply. do we really need this one :shrug: """ return self.request(cluster, sequence, data, *args, **kwargs) def log(self, lvl: int, msg: str, *args: Any, **kwargs: Any) -> None: msg = "[0x%04x] " + msg args = (self._group.group_id,) + args LOGGER.log(lvl, msg, *args, **kwargs) def __getitem__(self, item: int): """Return or instantiate a group cluster.""" try: return self.clusters[item] except KeyError: self.debug("trying to create new group %s cluster id", item) cluster = GroupCluster.from_id(self, item) self.clusters[item] = cluster return cluster def __getattr__(self, name: str): """Return or instantiate a group cluster by cluster name.""" try: return self._cluster_by_attr[name] except KeyError: self.debug("trying to create a new group '%s' cluster", name) cluster = GroupCluster.from_attr(self, name) self._cluster_by_attr[name] = cluster return cluster zigpy-0.62.3/zigpy/listeners.py000066400000000000000000000075451456054056700165170ustar00rootroot00000000000000from __future__ import annotations import asyncio import dataclasses import inspect import logging import typing from zigpy.util import Singleton from zigpy.zcl import foundation import zigpy.zdo.types as zdo_t LOGGER = logging.getLogger(__name__) ANY_DEVICE = Singleton("ANY_DEVICE") @dataclasses.dataclass(frozen=True) class BaseRequestListener: matchers: tuple[MatcherType] def resolve( self, hdr: foundation.ZCLHeader | zdo_t.ZDOHeader, command: foundation.CommandSchema, ) -> bool: """Attempts to resolve the listener with a given response. Can be called with any command as an argument, including ones we don't match. """ for matcher in self.matchers: match = None is_matcher_cmd = isinstance(matcher, foundation.CommandSchema) if is_matcher_cmd and isinstance(command, foundation.CommandSchema): match = command.matches(matcher) elif is_matcher_cmd and isinstance(hdr, zdo_t.ZDOHeader): # FIXME: ZDO does not use command schemas and cannot be matched pass elif callable(matcher): match = matcher(hdr, command) else: LOGGER.warning( "Matcher %r and command %r %r are incompatible", matcher, hdr, command, ) if match: return self._resolve(hdr, command) return False def _resolve( self, hdr: foundation.ZCLHeader | zdo_t.ZDOHeader, command: foundation.CommandSchema, ) -> bool: """Implemented by subclasses to handle matched commands. Return value indicates whether or not the listener has actually resolved, which can sometimes be unavoidable. """ raise NotImplementedError() # pragma: no cover def cancel(self): """Implement by subclasses to cancel the listener. Return value indicates whether or not the listener is cancelable. """ raise NotImplementedError() # pragma: no cover @dataclasses.dataclass(frozen=True) class FutureListener(BaseRequestListener): future: asyncio.Future def _resolve( self, hdr: foundation.ZCLHeader | zdo_t.ZDOHeader, command: foundation.CommandSchema, ) -> bool: if self.future.done(): return False self.future.set_result((hdr, command)) return True def cancel(self): self.future.cancel() return True @dataclasses.dataclass(frozen=True) class CallbackListener(BaseRequestListener): callback: typing.Callable[ [foundation.ZCLHeader | zdo_t.ZDOHeader, foundation.CommandSchema], typing.Any ] _tasks: set[asyncio.Task] = dataclasses.field(default_factory=set) def _resolve( self, hdr: foundation.ZCLHeader | zdo_t.ZDOHeader, command: foundation.CommandSchema, ) -> bool: try: potential_awaitable = self.callback(hdr, command) if inspect.isawaitable(potential_awaitable): task: asyncio.Task = asyncio.get_running_loop().create_task( potential_awaitable, name="CallbackListener" ) self._tasks.add(task) task.add_done_callback(self._tasks.remove) except Exception: LOGGER.warning( "Caught an exception while executing callback", exc_info=True ) # Callbacks are always resolved return True def cancel(self): # You can't cancel a callback return False MatcherFuncType = typing.Callable[ [ typing.Union[foundation.ZCLHeader, zdo_t.ZDOHeader], foundation.CommandSchema, ], bool, ] MatcherType = typing.Union[MatcherFuncType, foundation.CommandSchema] zigpy-0.62.3/zigpy/ota/000077500000000000000000000000001456054056700147055ustar00rootroot00000000000000zigpy-0.62.3/zigpy/ota/OTA_URLs.md000066400000000000000000000172621456054056700165670ustar00rootroot00000000000000# Zigbee OTA source provider sources for these and others Collection of external Zigbee OTA firmware images from official and unofficial OTA provider sources. ### Inovelli OTA Firmware provider Manufacturer ID = 4655 Inovelli Zigbee OTA firmware images for zigpy are made publicly available by Inovelli (first-party) at the following URLs: https://files.inovelli.com/firmware/firmware-zha.json https://files.inovelli.com/firmware ### Sonoff OTA Firmware provider Manufacturer ID = 4742 Sonoff Zigbee OTA firmware images are made publicly available by Sonoff (first-party) at the following URLs: https://zigbee-ota.sonoff.tech/releases/upgrade.json ### Koenkk zigbee-OTA repository Koenkk zigbee-OTA repository host third-party OTA firmware images and external URLs for many third-party Zigbee OTA firmware images. https://github.com/Koenkk/zigbee-OTA/tree/master/images https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json ### Dresden Elektronik Manufacturer ID = 4405 Dresden Elektronik Zigbee OTA firmware images are made publicly available by Dresden Elektronik (first-party) at the following URLs: https://deconz.dresden-elektronik.de/otau/ Dresden Elektronik also provide third-party OTA firmware images and external URLs for many third-party Zigbee OTA firmware images here: https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/OTA-Image-Types---Firmware-versions Dresden Elektronik themselvers implement updates of third-party Zigbee firmware images via their deCONZ STD OTAU plugin: https://github.com/dresden-elektronik/deconz-ota-plugin ### EUROTRONICS EUROTRONICS Zigbee OTA firmware images are made publicly available by EUROTRONIC Technology (first-party) at the following URL: https://github.com/EUROTRONIC-Technology/Spirit-ZigBee/releases/download/ ### IKEA Trådfri Manufacturer ID = 4476 IKEA Tradfi Zigbee OTA firmware images are made publicly available by IKEA (first-party) at the following URL: Download-URL: http://fw.ota.homesmart.ikea.net/feed/version_info.json Release changelogs https://ww8.ikea.com/ikeahomesmart/releasenotes/releasenotes.html ### LEDVANCE/Sylvania and OSRAM Lightify Manufacturer ID = 4364 LEDVANCE/Sylvania and OSRAM Lightify Zigbee OTA firmware images are made publicly available by LEDVANCE (first-party) at the following URL: https://update.ledvance.com/firmware-overview https://api.update.ledvance.com/v1/zigbee/firmwares/download https://consumer.sylvania.com/our-products/smart/sylvania-smart-zigbee-products-menu/index.jsp ### Legrand/Netatmo Manufacturer ID = 4129 Legrand/Netatmo Zigbee OTA firmware images are made publicly available by Legrand (first-party) at the following URL: https://developer.legrand.com/documentation/operating-manual/ https://developer.legrand.com/documentation/firmwares-download/ ### LiXee LiXee Zigbee OTA firmware images are made publicly available by Fairecasoimeme / ZiGate (first-party) at the following URL: https://github.com/fairecasoimeme/Zlinky_TIC/releases ### SALUS/Computime Manufacturer ID = 4216 SALUS/Computime Zigbee OTA firmware images are made publicly available by SALUS (first-party) at the following URL: https://eu.salusconnect.io/demo/default/status/firmware ### Sengled Manufacturer ID = 4448 Sengled Zigbee OTA firmware images are made publicly available by Sengled (first-party) at the following URLs but does now seem to allow listing: http://us-fm.cloud.sengled.com:8000/sengled/zigbee/firmware/ Note that Sengled do not seem to provide their firmware for use with other ZigBee gateways than the Sengled Smart Hub. The communication between their hub/gateway/bridge appliance and the server hosting the firmware files is encrypted, so we cannot directly get listing of all the files available. To find the URL for firmware files, you need to sniff the traffic from the Hue bridge to the Internet, as it downloads the files, (since the bridge will only download firmware files for connected devices with outdated firmware sniffing traffic is not repeatable once the device has been updated). The official URLs for Philips Hue (Signify) Zigbee OTA firmware images are therefore documented by community and third-parties such as Koenkk and Dresden Elektronik: https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/OTA-Image-Types---Firmware-versions#sengled ### Philips Hue (Signify) Manufacturer ID = 4107 Philips Hue OTA firmware images are available for different Hue devices for several official sources that do not all use the same APIs: https://firmware.meethue.com/v1/checkUpdate https://firmware.meethue.com/storage/ http://fds.dc1.philips.com/firmware/ Philips Hue (Signify) Zigbee OTA firmware images direct URLs are available by Koenkk zigbee-OTA repository (third-party) at following URL: https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json Note that Philips/Signify do not provide their firmware for use with other ZigBee gateways than the Philips Hue bridge. The communication between their hub/gateway/bridge appliance and the server hosting the firmware files is encrypted, so we cannot directly get listing of all the files available. To find the URL for firmware files, you need to sniff the traffic from the Hue bridge to the Internet, as it downloads the files, (since the bridge will only download firmware files for connected devices with outdated firmware sniffing traffic is not repeatable once the device has been updated). The official URLs for Philips Hue (Signify) Zigbee OTA firmware images are therefore documented by community and third-parties such as Koenkk and Dresden Elektronik: https://raw.githubusercontent.com/Koenkk/zigbee-OTA/master/index.json https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/OTA-Image-Types---Firmware-versions#philips-hue https://github.com/dresden-elektronik/deconz-ota-plugin/blob/master/README.md#hue-firmware ### Lutron Manufacturer ID = 4420 Lutron Zigbee OTA firmware images for Lutron Aurora Smart Dimmer Z3-1BRL-WH-L0 is made publicly available by Philips (first-party as ODM) at the following URL: http://fds.dc1.philips.com/firmware/ZGB_1144_0000/3040/Superman_v3_04_Release_3040.ota ### Ubisys Manufacturer ID = 4338 Ubisys Zigbee OTA firmware images are made publicly available by Ubisys (first-party) at the following URLs: https://www.ubisys.de/en/support/firmware/ https://www.ubisys.de/wp-content/uploads/ ### Third Reality (3reality) Manufacturer IDs = 4659, 4877 ThirdReality (3reality) Zigbee OTA firmware images are made publicly available by Third Reality, Inc. (first-party) at the following URL: https://tr-zha.s3.amazonaws.com/firmware.json ### Danfoss Manufacturer ID = 4678 Danfoss Zigbee OTA firmware images for Danfoss Ally devices are made publicly available by Danfoss (first-party) at the following URL: https://files.danfoss.com/download/Heating/Ally/Danfoss%20Ally More information about updateting Danfoss Ally smart heating products available at: https://www.danfoss.com/en/products/dhs/smart-heating/smart-heating/danfoss-ally/danfoss-ally-support/#tab-approvals ### Busch-Jaeger Manufacturer ID = 4398 The ZLL switches from Busch-Jaeger does have upgradable firmware but unfortunately they do not publish the OTOU image files directly via an public OTA provider server. However the firmware can be download and extracted from an Windows Upgrade Tool provided by Busch-Jaeger with the following steps: - Download the Upgrade Tool from https://www.busch-jaeger.de/bje/software/Zigbee_Software/BJE_ZLL_Update_Tool_Setup_V1_2_0_Windows_Version.exe - Extract the contents of the *.exe file with 7zip (7z x BJE_ZLL_Update_Tool_Setup_V1_2_0_Windows_Version.exe). - Navigate to the device/ folder and get the firmware images. zigpy-0.62.3/zigpy/ota/__init__.py000066400000000000000000000135551456054056700170270ustar00rootroot00000000000000"""OTA support for Zigbee devices.""" from __future__ import annotations import datetime import logging import attr from zigpy.config import ( CONF_OTA, CONF_OTA_ALLOW_FILE_PROVIDERS, CONF_OTA_DIR, CONF_OTA_IKEA, CONF_OTA_INOVELLI, CONF_OTA_LEDVANCE, CONF_OTA_PROVIDER_MANUF_IDS, CONF_OTA_PROVIDER_URL, CONF_OTA_REMOTE_PROVIDERS, CONF_OTA_SALUS, CONF_OTA_SONOFF, CONF_OTA_THIRDREALITY, ) from zigpy.ota.image import BaseOTAImage, ImageKey, OTAImageHeader import zigpy.ota.provider from zigpy.ota.validators import check_invalid import zigpy.types as t from zigpy.typing import ControllerApplicationType import zigpy.util LOGGER = logging.getLogger(__name__) TIMEDELTA_0 = datetime.timedelta() DELAY_EXPIRATION = datetime.timedelta(hours=2) @attr.s class CachedImage: MAXIMUM_DATA_SIZE = 40 DEFAULT_EXPIRATION = datetime.timedelta(hours=18) image = attr.ib(default=None) expires_on = attr.ib(default=None) cached_data = attr.ib(default=None) @classmethod def new(cls, img: BaseOTAImage) -> CachedImage: expiration = datetime.datetime.now() + cls.DEFAULT_EXPIRATION return cls(img, expiration) @property def expired(self) -> bool: if self.expires_on is None: return False return self.expires_on - datetime.datetime.now() < TIMEDELTA_0 @property def key(self) -> ImageKey: return self.image.header.key @property def header(self) -> OTAImageHeader: return self.image.header @property def version(self) -> int: return self.image.header.file_version def should_update( self, manufacturer_id: t.uint16_t, img_type: t.uint16_t, ver: t.uint32_t, hw_ver: None = None, ) -> bool: """Check if it should upgrade""" if self.key != ImageKey(manufacturer_id, img_type): return False if ver >= self.version: return False if ( hw_ver is not None and self.image.header.hardware_versions_present and not ( self.image.header.minimum_hardware_version <= hw_ver <= self.image.header.maximum_hardware_version ) ): return False return True def get_image_block(self, offset: t.t.uint32_t, size: t.uint8_t) -> bytes: if ( self.expires_on is not None and self.expires_on - datetime.datetime.now() < DELAY_EXPIRATION ): self.expires_on += DELAY_EXPIRATION if self.cached_data is None: self.cached_data = self.image.serialize() if offset > len(self.cached_data): raise ValueError("Offset exceeds image size") return self.cached_data[offset : offset + min(self.MAXIMUM_DATA_SIZE, size)] def serialize(self) -> bytes: """Serialize the image.""" if self.cached_data is None: self.cached_data = self.image.serialize() return self.cached_data class OTA(zigpy.util.ListenableMixin): """OTA Manager.""" def __init__(self, app: ControllerApplicationType, *args, **kwargs) -> None: self._app: ControllerApplicationType = app self._image_cache: dict[ImageKey, CachedImage] = {} self._not_initialized = True self._listeners = {} ota_config = app.config[CONF_OTA] if ota_config[CONF_OTA_DIR]: if ota_config[CONF_OTA_ALLOW_FILE_PROVIDERS]: self.add_listener(zigpy.ota.provider.FileStore()) else: LOGGER.info("OTA file providers are currently disabled") if ota_config[CONF_OTA_IKEA]: self.add_listener(zigpy.ota.provider.Trådfri()) if ota_config[CONF_OTA_INOVELLI]: self.add_listener(zigpy.ota.provider.Inovelli()) if ota_config[CONF_OTA_LEDVANCE]: self.add_listener(zigpy.ota.provider.Ledvance()) if ota_config[CONF_OTA_SALUS]: self.add_listener(zigpy.ota.provider.Salus()) if ota_config[CONF_OTA_SONOFF]: self.add_listener(zigpy.ota.provider.Sonoff()) if ota_config[CONF_OTA_THIRDREALITY]: self.add_listener(zigpy.ota.provider.ThirdReality()) for provider_config in ota_config[CONF_OTA_REMOTE_PROVIDERS]: self.add_listener( zigpy.ota.provider.RemoteProvider( url=provider_config[CONF_OTA_PROVIDER_URL], manufacturer_ids=provider_config[CONF_OTA_PROVIDER_MANUF_IDS], ) ) async def initialize(self) -> None: await self.async_event("initialize_provider", self._app.config[CONF_OTA]) self._not_initialized = False async def get_ota_image( self, manufacturer_id: t.uint16_t, image_type: t.uint16_t, model: str | None = None, ) -> CachedImage | None: if manufacturer_id in ( zigpy.ota.provider.Salus.MANUFACTURER_ID, ): # Salus/computime do not pass a useful image_type # in the message from the device. So construct key based on model name. key = ImageKey(manufacturer_id, model) else: key = ImageKey(manufacturer_id, image_type) if key in self._image_cache and not self._image_cache[key].expired: return self._image_cache[key] images = await self.async_event("get_image", key) valid_images = [] for image in images: if image is None or check_invalid(image): continue valid_images.append(image) if not valid_images: return None cached = CachedImage.new( max(valid_images, key=lambda img: img.header.file_version) ) self._image_cache[key] = cached return cached @property def not_initialized(self): return self._not_initialized zigpy-0.62.3/zigpy/ota/image.py000066400000000000000000000204761456054056700163520ustar00rootroot00000000000000"""OTA Firmware handling.""" from __future__ import annotations import hashlib import logging import attr import zigpy.types as t LOGGER = logging.getLogger(__name__) @attr.s(frozen=True) class ImageKey: manufacturer_id = attr.ib(default=None) image_type = attr.ib(default=None) class HWVersion(t.uint16_t): @property def version(self): return self >> 8 @property def revision(self): return self & 0x00FF def __repr__(self): return "<{} version={} revision={}>".format( self.__class__.__name__, self.version, self.revision ) class HeaderString(str): _size = 32 @classmethod def deserialize(cls, data: bytes) -> tuple[HeaderString, bytes]: if len(data) < cls._size: raise ValueError(f"Data is too short. Should be at least {cls._size}") raw = data[: cls._size].split(b"\x00")[0] return cls(raw.decode("utf8", errors="replace")), data[cls._size :] def serialize(self) -> bytes: return self.encode("utf8").ljust(self._size, b"\x00") class FieldControl(t.bitmap16): SECURITY_CREDENTIAL_VERSION_PRESENT = 0b001 DEVICE_SPECIFIC_FILE_PRESENT = 0b010 HARDWARE_VERSIONS_PRESENT = 0b100 class OTAImageHeader(t.Struct): MAGIC_VALUE = 0x0BEEF11E OTA_HEADER = MAGIC_VALUE.to_bytes(4, "little") upgrade_file_id: t.uint32_t header_version: t.uint16_t header_length: t.uint16_t field_control: FieldControl manufacturer_id: t.uint16_t image_type: t.uint16_t file_version: t.uint32_t stack_version: t.uint16_t header_string: HeaderString image_size: t.uint32_t security_credential_version: t.uint8_t = t.StructField( requires=lambda s: s.security_credential_version_present ) upgrade_file_destination: t.EUI64 = t.StructField( requires=lambda s: s.device_specific_file ) minimum_hardware_version: HWVersion = t.StructField( requires=lambda s: s.hardware_versions_present ) maximum_hardware_version: HWVersion = t.StructField( requires=lambda s: s.hardware_versions_present ) @property def security_credential_version_present(self) -> bool: if self.field_control is None: return None return bool( self.field_control & FieldControl.SECURITY_CREDENTIAL_VERSION_PRESENT ) @property def device_specific_file(self) -> bool: if self.field_control is None: return None return bool(self.field_control & FieldControl.DEVICE_SPECIFIC_FILE_PRESENT) @property def hardware_versions_present(self) -> bool: if self.field_control is None: return None return bool(self.field_control & FieldControl.HARDWARE_VERSIONS_PRESENT) @property def key(self) -> ImageKey: return ImageKey(self.manufacturer_id, self.image_type) @classmethod def deserialize(cls, data: bytes) -> tuple[OTAImageHeader, bytes]: hdr, data = super().deserialize(data) if hdr.upgrade_file_id != cls.MAGIC_VALUE: raise ValueError( f"Wrong magic number for OTA Image: {hdr.upgrade_file_id!r}" ) return hdr, data class ElementTagId(t.enum16): UPGRADE_IMAGE = 0x0000 ECDSA_SIGNATURE_CRYPTO_SUITE_1 = 0x0001 ECDSA_SIGNING_CERTIFICATE_CRYPTO_SUITE_1 = 0x0002 IMAGE_INTEGRITY_CODE = 0x0003 PICTURE_DATA = 0x0004 ECDSA_SIGNATURE_CRYPTO_SUITE_2 = 0x0005 ECDSA_SIGNING_CERTIFICATE_CRYPTO_SUITE_2 = 0x0006 class LVBytes32(t.LVBytes): _prefix_length = 4 class SubElement(t.Struct): tag_id: ElementTagId data: LVBytes32 class BaseOTAImage: """Base OTA image container type. Not all images are valid Zigbee OTA images but are nonetheless accepted by devices. Only requirement is that the image contains a valid OTAImageHeader property and can be serialized/deserialized. """ header: OTAImageHeader @classmethod def deserialize(cls, data) -> tuple[BaseOTAImage, bytes]: raise NotImplementedError() # pragma: no cover def serialize(self): raise NotImplementedError() # pragma: no cover class OTAImage(t.Struct, BaseOTAImage): """Zigbee OTA image according to 11.4 of the ZCL specification.""" header: OTAImageHeader subelements: t.List[SubElement] @classmethod def deserialize(cls, data: bytes) -> tuple[OTAImage, bytes]: hdr, data = OTAImageHeader.deserialize(data) elements_len = hdr.image_size - hdr.header_length if elements_len > len(data): raise ValueError(f"Data is too short for {cls}") image = cls(header=hdr, subelements=[]) element_data, data = data[:elements_len], data[elements_len:] while element_data: element, element_data = SubElement.deserialize(element_data) image.subelements.append(element) return image, data def serialize(self) -> bytes: res = super().serialize() assert len(res) == self.header.image_size return res @attr.s class HueSBLOTAImage(BaseOTAImage): """Unique OTA image format for certain Hue devices. Starts with a valid header but does not contain any valid subelements beyond that point. """ SUBELEMENTS_MAGIC = b"\x2A\x00\x01" header = attr.ib(default=None) data = attr.ib(default=None) def serialize(self) -> bytes: return self.header.serialize() + self.data @classmethod def deserialize(cls, data: bytes) -> tuple[HueSBLOTAImage, bytes]: header, remaining_data = OTAImageHeader.deserialize(data) firmware = remaining_data[: header.image_size - len(header.serialize())] if len(data) < header.image_size: raise ValueError( f"Data is too short to contain image: {len(data)} < {header.image_size}" ) if not firmware.startswith(cls.SUBELEMENTS_MAGIC): raise ValueError( f"Firmware does not start with expected magic bytes: {firmware[:10]!r}" ) if header.manufacturer_id != 4107: raise ValueError( f"Only Hue images are expected. Got: {header.manufacturer_id}" ) return cls(header=header, data=firmware), data[header.image_size :] def parse_ota_image(data: bytes) -> tuple[BaseOTAImage, bytes]: """Attempts to extract any known OTA image type from data. Does not validate firmware.""" if len(data) > 4 and int.from_bytes(data[0:4], "little") + 21 == len(data): # Legrand OTA images are prefixed with their unwrapped size and include a 1 + 16 # byte suffix return OTAImage.deserialize(data[4:-17]) elif ( len(data) > 152 # Avoid the SHA512 hash until we're pretty sure this is a Third Reality image and int.from_bytes(data[68:72], "little") + 64 == len(data) and data.startswith(hashlib.sha512(data[64:]).digest()) ): # Third Reality OTA images contain a 152 byte header with multiple SHA512 hashes # and the image length return OTAImage.deserialize(data[152:]) elif data.startswith(b"NGIS"): # IKEA container needs to be unwrapped if len(data) <= 24: raise ValueError( f"Data too short to contain IKEA container header: {len(data)}" ) offset = int.from_bytes(data[16:20], "little") size = int.from_bytes(data[20:24], "little") if len(data) <= offset + size: raise ValueError(f"Data too short to be IKEA container: {len(data)}") wrapped_data = data[offset : offset + size] image, rest = OTAImage.deserialize(wrapped_data) if rest: LOGGER.warning( "Fixing IKEA OTA image with trailing data (%s bytes)", size - image.header.image_size, ) image.header.image_size += len(rest) # No other structure has been observed assert len(image.subelements) == 1 assert image.subelements[0].tag_id == ElementTagId.UPGRADE_IMAGE image.subelements[0].data += rest rest = b"" return image, rest try: # Hue sbl-ota images start with a Zigbee OTA header but contain no valid # subelements after that. Try it first. return HueSBLOTAImage.deserialize(data) except ValueError: return OTAImage.deserialize(data) zigpy-0.62.3/zigpy/ota/manager.py000066400000000000000000000177641456054056700167100ustar00rootroot00000000000000"""OTA manager for Zigpy. initial implementation from: https://github.com/zigpy/zigpy/pull/1102""" from __future__ import annotations import asyncio import contextlib from typing import TYPE_CHECKING from zigpy.zcl import foundation from zigpy.zcl.clusters.general import Ota if TYPE_CHECKING: from zigpy.device import Device from zigpy.ota.image import BaseOTAImage class OTAManager: """Class to manage OTA updates for a device.""" def __init__( self, device: Device, image: BaseOTAImage, progress_callback=None, ) -> None: self.device = device self.ota_cluster = None for ep in device.non_zdo_endpoints: try: self.ota_cluster = ep.out_clusters[Ota.cluster_id] break except KeyError: pass else: raise ValueError("Device has no OTA cluster") self.image = image self._image_data = image.serialize() self.progress_callback = progress_callback self._upgrade_end_future = asyncio.get_running_loop().create_future() self.stack = contextlib.ExitStack() def __enter__(self) -> OTAManager: self.stack.enter_context( self.device._application.callback_for_response( src=self.device, filters=[ Ota.ServerCommandDefs.query_next_image.schema(), ], callback=self._image_query_req, ) ) self.stack.enter_context( self.device._application.callback_for_response( src=self.device, filters=[ Ota.ServerCommandDefs.image_block.schema(), ], callback=self._image_block_req, ) ) self.stack.enter_context( self.device._application.callback_for_response( src=self.device, filters=[ Ota.ServerCommandDefs.upgrade_end.schema(), ], callback=self._upgrade_end, ) ) return self def __exit__(self, *exc_details) -> None: self.stack.close() async def _image_query_req( self, hdr: foundation.ZCLHeader, command: Ota.QueryNextImageCommand ) -> None: """Handle image query request.""" try: assert self.ota_cluster await self.ota_cluster.query_next_image_response( status=foundation.Status.SUCCESS, manufacturer_code=self.image.header.manufacturer_id, image_type=self.image.header.image_type, file_version=self.image.header.file_version, image_size=self.image.header.image_size, tsn=hdr.tsn, ) except Exception as ex: self.device.debug("OTA query_next_image handler - exception: %s", ex) self._upgrade_end_future.set_result(foundation.Status.FAILURE) async def _image_block_req( self, hdr: foundation.ZCLHeader, command: Ota.ImageBlockCommand ) -> None: """Handle image block request.""" self.device.debug( ( "OTA image_block handler for '%s %s': field_control=%s" ", manufacturer_id=%s, image_type=%s, file_version=%s" ", file_offset=%s, max_data_size=%s, request_node_addr=%s" ", block_request_delay=%s" ), self.device.manufacturer, self.device.model, command.field_control, command.manufacturer_code, command.image_type, command.file_version, command.file_offset, command.maximum_data_size, command.request_node_addr, command.minimum_block_period, ) block = self._image_data[ command.file_offset : command.file_offset + command.maximum_data_size ] if not block: try: assert self.ota_cluster await self.ota_cluster.image_block_response( status=foundation.Status.MALFORMED_COMMAND, tsn=hdr.tsn, ) except Exception as ex: self.device.debug( "OTA image_block handler[MALFORMED_COMMAND] - exception: %s", ex ) self._upgrade_end_future.set_result(foundation.Status.MALFORMED_COMMAND) return try: assert self.ota_cluster await self.ota_cluster.image_block_response( status=foundation.Status.SUCCESS, manufacturer_code=self.image.header.manufacturer_id, image_type=self.image.header.image_type, file_version=self.image.header.file_version, file_offset=command.file_offset, image_data=block, tsn=hdr.tsn, ) if self.progress_callback is not None: self.progress_callback( command.file_offset + len(block), len(self._image_data) ) except Exception as ex: self.device.debug("OTA image_block handler - exception: %s", ex) self._upgrade_end_future.set_result(foundation.Status.FAILURE) async def _upgrade_end( self, hdr: foundation.ZCLHeader, command: foundation.CommandSchema ) -> None: """Handle upgrade end request.""" try: assert self.ota_cluster self.device.debug( ( "OTA upgrade_end handler for '%s %s': status=%s" ", manufacturer_id=%s, image_type=%s, file_version=%s" ), self.device.manufacturer, self.device.model, command.status, self.image.header.manufacturer_id, self.image.header.image_type, self.image.header.file_version, ) await self.ota_cluster.upgrade_end_response( manufacturer_code=self.image.header.manufacturer_id, image_type=self.image.header.image_type, file_version=self.image.header.file_version, current_time=0x00000000, upgrade_time=0x00000000, tsn=hdr.tsn, ) self._upgrade_end_future.set_result(command.status) except Exception as ex: self.device.debug("OTA upgrade_end handler - exception: %s", ex) self._upgrade_end_future.set_result(foundation.Status.FAILURE) async def notify(self) -> None: """Notify device of new image.""" try: assert self.ota_cluster await self.ota_cluster.image_notify( payload_type=( self.ota_cluster.ImageNotifyCommand.PayloadType.QueryJitter ), query_jitter=100, ) except Exception as ex: self.device.debug("OTA image_notify handler - exception: %s", ex) self._upgrade_end_future.set_result(foundation.Status.FAILURE) async def wait(self) -> foundation.Status: """Wait for upgrade end response.""" return await self._upgrade_end_future async def update_firmware( device: Device, image: BaseOTAImage, progress_callback: callable = None, force: bool = False, ) -> foundation.Status: """Update the firmware on a Zigbee device.""" if force: # Force it to send the image even if it's the same version image.header.file_version = 0xFFFFFFFF - 1 def progress(current: int, total: int): progress = (100 * current) / total device.info( "OTA upgrade progress: (%d / %d): %0.4f%%", current, total, progress, ) if progress_callback is not None: progress_callback(current, total, progress) with OTAManager(device, image, progress_callback=progress) as ota: await ota.notify() return await ota.wait() zigpy-0.62.3/zigpy/ota/provider.py000066400000000000000000000737671456054056700171350ustar00rootroot00000000000000"""OTA Firmware providers.""" from __future__ import annotations from abc import ABC, abstractmethod import asyncio from collections import defaultdict import datetime import hashlib import io import logging import os import os.path import re import ssl import tarfile import typing import urllib.parse import aiohttp import attr from zigpy.config import CONF_OTA_DIR, CONF_OTA_SONOFF_URL from zigpy.ota.image import BaseOTAImage, ImageKey, OTAImageHeader, parse_ota_image import zigpy.util LOGGER = logging.getLogger(__name__) LOCK_REFRESH = "firmware_list" ENABLE_IKEA_OTA = "enable_ikea_ota" ENABLE_INOVELLI_OTA = "enable_inovelli_ota" ENABLE_LEDVANCE_OTA = "enable_ledvance_ota" SKIP_OTA_FILES = (ENABLE_IKEA_OTA, ENABLE_INOVELLI_OTA, ENABLE_LEDVANCE_OTA) class Basic(zigpy.util.LocalLogMixin, ABC): """Skeleton OTA Firmware provider.""" REFRESH = datetime.timedelta(hours=12) def __init__(self) -> None: self.config: dict[str, str | int] = {} self._cache: dict[ImageKey, BaseOTAImage] = {} self._is_enabled = False self._locks: defaultdict[asyncio.Semaphore] = defaultdict(asyncio.Semaphore) self._last_refresh = None @abstractmethod async def initialize_provider(self, ota_config: dict) -> None: """Initialize OTA provider.""" @abstractmethod async def refresh_firmware_list(self) -> None: """Loads list of firmware into memory.""" async def filter_get_image(self, key: ImageKey) -> bool: """Filter unwanted get_image lookups.""" return False async def get_image(self, key: ImageKey) -> BaseOTAImage | None: if await self.filter_get_image(key): return None if not self.is_enabled or self._locks[key].locked(): return None if self.expired: await self.refresh_firmware_list() try: fw_file = self._cache[key] except KeyError: return None async with self._locks[key]: return await fw_file.fetch_image() def disable(self) -> None: self._is_enabled = False def enable(self) -> None: self._is_enabled = True def update_expiration(self) -> None: self._last_refresh = datetime.datetime.now() @property def is_enabled(self) -> bool: return self._is_enabled @property def expired(self) -> bool: """Return True if firmware list needs refreshing.""" if self._last_refresh is None: return True return datetime.datetime.now() - self._last_refresh > self.REFRESH def log(self, lvl: int, msg: str, *args, **kwargs) -> None: """Log a message""" msg = f"{self.__class__.__name__}: {msg}" return LOGGER.log(lvl, msg, *args, **kwargs) @attr.s class IKEAImage: image_type: int = attr.ib() binary_url: str = attr.ib() sha3_256_sum: str = attr.ib() @classmethod def new(cls, data: dict[str, str | int]) -> IKEAImage: return cls( image_type=data["fw_image_type"], sha3_256_sum=data["fw_sha3_256"], binary_url=data["fw_binary_url"], ) @property def version(self) -> int: file_version_match = re.match(r".*_v(?P\d+)_.*", self.binary_url) if file_version_match is None: raise ValueError(f"Couldn't parse firmware version from {self}") return int(file_version_match.group("v"), 10) @property def key(self) -> ImageKey: return ImageKey(Trådfri.MANUFACTURER_ID, self.image_type) async def fetch_image(self) -> BaseOTAImage | None: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.binary_url, self.key) async with req.get(self.binary_url, ssl=Trådfri.SSL_CTX) as rsp: data = await rsp.read() assert hashlib.sha3_256(data).hexdigest() == self.sha3_256_sum ota_image, _ = parse_ota_image(data) assert ota_image.header.key == self.key LOGGER.debug("Finished downloading %s", self) return ota_image class Trådfri(Basic): """IKEA OTA Firmware provider.""" UPDATE_URL = "https://fw.ota.homesmart.ikea.com/DIRIGERA/version_info.json" MANUFACTURER_ID = 4476 HEADERS = {"accept": "application/json;q=0.9,*/*;q=0.8"} # `openssl s_client -connect fw.ota.homesmart.ikea.com:443 -showcerts` SSL_CTX = ssl.create_default_context( cadata="""\ -----BEGIN CERTIFICATE----- MIICGDCCAZ+gAwIBAgIUdfH0KDnENv/dEcxH8iVqGGGDqrowCgYIKoZIzj0EAwMw SzELMAkGA1UEBhMCU0UxGjAYBgNVBAoMEUlLRUEgb2YgU3dlZGVuIEFCMSAwHgYD VQQDDBdJS0VBIEhvbWUgc21hcnQgUm9vdCBDQTAgFw0yMTA1MjYxOTAxMDlaGA8y MDcxMDUxNDE5MDEwOFowSzELMAkGA1UEBhMCU0UxGjAYBgNVBAoMEUlLRUEgb2Yg U3dlZGVuIEFCMSAwHgYDVQQDDBdJS0VBIEhvbWUgc21hcnQgUm9vdCBDQTB2MBAG ByqGSM49AgEGBSuBBAAiA2IABIDRUvKGFMUu2zIhTdgfrfNcPULwMlc0TGSrDLBA oTr0SMMV4044CRZQbl81N4qiuHGhFzCnXapZogkiVuFu7ZqSslsFuELFjc6ZxBjk Kmud+pQM6QQdsKTE/cS06dA+P6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E FgQUcdlEnfX0MyZA4zAdY6CLOye9wfwwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49 BAMDA2cAMGQCMG6mFIeB2GCFch3r0Gre4xRH+f5pn/bwLr9yGKywpeWvnUPsQ1KW ckMLyxbeNPXdQQIwQc2YZDq/Mz0mOkoheTUWiZxK2a5bk0Uz1XuGshXmQvEg5TGy 2kVHW/Mz9/xwpy4u -----END CERTIFICATE-----""" ) async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") self.config = ota_config await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: async with req.get(self.UPDATE_URL, ssl=self.SSL_CTX) as rsp: # IKEA does not always respond with an appropriate Content-Type # but the response is always JSON if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json(content_type=None) self.debug("Finished downloading firmware update list") self._cache.clear() for fw in fw_lst: if "fw_image_type" not in fw: continue img = IKEAImage.new(fw) self._cache[img.key] = img self.update_expiration() async def filter_get_image(self, key: ImageKey) -> bool: return key.manufacturer_id != self.MANUFACTURER_ID @attr.s class LedvanceImage: """Ledvance image handler.""" manufacturer_id = attr.ib() image_type = attr.ib() version = attr.ib(default=None) image_size = attr.ib(default=None) url = attr.ib(default=None) @classmethod def new(cls, data): identity = data["identity"] version_parts = identity["version"] # This matches the OTA file's `image_version` for every image version = ( (version_parts["major"] << 24) | (version_parts["minor"] << 16) | (version_parts["build"] << 8) | (version_parts["revision"] << 0) ) res = cls( manufacturer_id=identity["company"], image_type=identity["product"], version=version, ) res.file_version = int(data["fullName"].split("/")[1], 16) res.image_size = data["length"] res.url = ( "https://api.update.ledvance.com/v1/zigbee/firmwares/download?" + urllib.parse.urlencode( { "Company": identity["company"], "Product": identity["product"], "Version": ( f"{version_parts['major']}.{version_parts['minor']}" f".{version_parts['build']}.{version_parts['revision']}" ), } ) ) return res @property def key(self): return ImageKey(self.manufacturer_id, self.image_type) async def fetch_image(self) -> BaseOTAImage | None: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.url, self.key) async with req.get(self.url) as rsp: data = await rsp.read() img, _ = parse_ota_image(data) assert img.header.key == self.key LOGGER.debug( "%s: version: %s, hw_ver: (%s, %s), OTA string: %s", img.header.key, img.header.file_version, img.header.minimum_hardware_version, img.header.maximum_hardware_version, img.header.header_string, ) LOGGER.debug( "Finished downloading %s bytes from %s for %s ver %s", self.image_size, self.url, self.key, self.version, ) return img class Ledvance(Basic): """Ledvance firmware provider""" # documentation: https://portal.update.ledvance.com/docs/services/firmware-rest-api/ UPDATE_URL = "https://api.update.ledvance.com/v1/zigbee/firmwares" HEADERS = {"accept": "application/json"} async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: async with req.get(self.UPDATE_URL) as rsp: if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json() self.debug("Finished downloading firmware update list") self._cache.clear() for fw in fw_lst["firmwares"]: img = LedvanceImage.new(fw) # Ignore earlier images if img.key in self._cache and self._cache[img.key].version > img.version: continue self._cache[img.key] = img self.update_expiration() @attr.s class SalusImage: """Salus image handler.""" manufacturer_id = attr.ib() model = attr.ib() version = attr.ib(default=None) image_size = attr.ib(default=None) url = attr.ib(default=None) @classmethod def new(cls, data): mod = data["model"] ver = data["version"] url = data["url"] res = cls( manufacturer_id=Salus.MANUFACTURER_ID, model=mod, version=ver, url=url ) return res @property def key(self): return ImageKey(self.manufacturer_id, self.model) async def fetch_image(self) -> BaseOTAImage | None: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.url, self.key) async with req.get(self.url) as rsp: data = await rsp.read() img_tgz = io.BytesIO(data) with tarfile.open(fileobj=img_tgz) as tar: # Unpack tar for item in tar: if item.name.endswith(".ota"): f = tar.extractfile(item) if f is None: raise ValueError( f"Issue extracting {item.name} from {self.url}" ) else: file_bytes = f.read() break img, _ = parse_ota_image(file_bytes) LOGGER.debug( "%s: version: %s, hw_ver: (%s, %s), OTA string: %s", img.header.key, img.header.file_version, img.header.minimum_hardware_version, img.header.maximum_hardware_version, img.header.header_string, ) assert img.header.manufacturer_id == self.manufacturer_id # we can't check assert img.header.key == self.key because # self.key does not include any valid image_type data for salus # devices. It is not known at the point of generating the FW # list cache, so it can't be checked here (Ikea and ledvance have # this listed in the JSON, so they already know and can do this). LOGGER.debug( "Finished downloading %s bytes from %s for %s ver %s", self.image_size, self.url, self.key, self.version, ) return img class Salus(Basic): """Salus firmware provider""" # documentation: none known. UPDATE_URL = "https://eu.salusconnect.io/demo/default/status/firmware" MANUFACTURER_ID = 4216 HEADERS = {"accept": "application/json"} async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: async with req.get(self.UPDATE_URL) as rsp: if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json() self.debug("Finished downloading firmware update list") self._cache.clear() for fw in fw_lst["versions"]: img = SalusImage.new(fw) self._cache[img.key] = img self.update_expiration() @attr.s class SONOFFImage: manufacturer_id = attr.ib() image_type = attr.ib() version = attr.ib(default=None) image_size = attr.ib(default=None) url = attr.ib(default=None) @classmethod def new(cls, data): res = cls(data["fw_manufacturer_id"], data["fw_image_type"]) res.version = data["fw_file_version"] res.image_size = data["fw_filesize"] res.url = data["fw_binary_url"] return res @property def key(self): return ImageKey(self.manufacturer_id, self.image_type) async def fetch_image(self) -> BaseOTAImage | None: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.url, self.key) async with req.get(self.url) as rsp: data = await rsp.read() ota_image, _ = parse_ota_image(data) assert ota_image.header.key == self.key LOGGER.debug( "Finished downloading %s bytes from %s for %s ver %s", self.image_size, self.url, self.key, self.version, ) return ota_image class Sonoff(Basic): """Sonoff OTA Firmware provider.""" UPDATE_URL = "https://zigbee-ota.sonoff.tech/releases/upgrade.json" MANUFACTURER_ID = 4742 HEADERS = {"accept": "application/json;q=0.9,*/*;q=0.8"} async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") self.config = ota_config await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: url = self.config.get(CONF_OTA_SONOFF_URL, self.UPDATE_URL) async with req.get(url) as rsp: if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json() self.debug("Finished downloading firmware update list") self._cache.clear() for fw in fw_lst: img = SONOFFImage.new(fw) self._cache[img.key] = img self.update_expiration() async def filter_get_image(self, key: ImageKey) -> bool: return key.manufacturer_id != self.MANUFACTURER_ID @attr.s class FileImage: REFRESH = datetime.timedelta(hours=24) file_name = attr.ib(default=None) header = attr.ib(factory=OTAImageHeader) @property def key(self) -> ImageKey: return ImageKey(self.header.manufacturer_id, self.header.image_type) @property def version(self) -> int: return self.header.file_version @classmethod def scan_image(cls, file_name: str): """Check the header of the image.""" try: with open(file_name, mode="rb") as f: parsed_image, _ = parse_ota_image(f.read()) img = cls(file_name=file_name, header=parsed_image.header) LOGGER.debug( "%s: %s, version: %s, hw_ver: (%s, %s), OTA string: %s", img.key, img.file_name, img.version, img.header.minimum_hardware_version, img.header.maximum_hardware_version, img.header.header_string, ) return img except (OSError, ValueError): LOGGER.debug( "File '%s' doesn't appear to be a OTA image", file_name, exc_info=True ) return None def fetch_image(self) -> BaseOTAImage | None: """Load image using executor.""" loop = asyncio.get_event_loop() return loop.run_in_executor(None, self._fetch_image) def _fetch_image(self) -> BaseOTAImage | None: """Loads full OTA Image from the file.""" try: with open(self.file_name, mode="rb") as f: data = f.read() img, _ = parse_ota_image(data) return img except (OSError, ValueError): LOGGER.debug("Couldn't load '%s' OTA image", self.file_name, exc_info=True) return None class FileStore(Basic): def __init__(self) -> None: super().__init__() self._ota_dir = None @staticmethod def validate_ota_dir(ota_dir: str) -> str | None: """Return True if exists and is a dir.""" if ota_dir is None: return None if os.path.exists(ota_dir): if os.path.isdir(ota_dir): return ota_dir LOGGER.error("OTA image path '%s' is not a directory", ota_dir) else: LOGGER.debug("OTA image directory '%s' does not exist", ota_dir) return None async def initialize_provider(self, ota_config: dict) -> None: ota_dir = ota_config[CONF_OTA_DIR] self._ota_dir = self.validate_ota_dir(ota_dir) if self._ota_dir is not None: self.enable() await self.refresh_firmware_list() async def refresh_firmware_list(self) -> None: if self._ota_dir is None: return None self._cache.clear() loop = asyncio.get_event_loop() for root, _dirs, files in os.walk(self._ota_dir): for file in files: if file in SKIP_OTA_FILES: continue file_name = os.path.join(root, file) img = await loop.run_in_executor(None, FileImage.scan_image, file_name) if img is None: continue if img.key in self._cache: if img.version > self._cache[img.key].version: self.debug( "%s: Preferring '%s' over '%s'", img.key, file_name, self._cache[img.key].file_name, ) self._cache[img.key] = img elif img.version == self._cache[img.key].version: self.debug( "%s: Ignoring '%s' already have %s version", img.key, file_name, img.version, ) else: self.debug( "%s: Preferring '%s' over '%s'", img.key, self._cache[img.key].file_name, file_name, ) else: self._cache[img.key] = img self.update_expiration() @attr.s class INOVELLIImage: manufacturer_id = attr.ib() image_type = attr.ib() version = attr.ib() url = attr.ib() @classmethod def from_json(cls, obj: dict[str, str | int]) -> INOVELLIImage: version = int(obj["version"], 16) # Old Inovelli OTA JSON versions were in hex, they then switched back to decimal if version > 0x10: version = int(obj["version"]) return cls( manufacturer_id=obj["manufacturer_id"], image_type=obj["image_type"], version=version, url=obj["firmware"], ) @property def key(self) -> ImageKey: return ImageKey(self.manufacturer_id, self.image_type) async def fetch_image(self) -> BaseOTAImage | None: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.url, self.key) async with req.get(self.url) as rsp: data = await rsp.read() ota_image, _ = parse_ota_image(data) assert ota_image.header.key == self.key LOGGER.debug( "Finished downloading from %s for %s ver %s", self.url, self.key, self.version, ) return ota_image class Inovelli(Basic): """Inovelli OTA Firmware provider.""" UPDATE_URL = "https://files.inovelli.com/firmware/firmware-zha.json" MANUFACTURER_ID = 4655 HEADERS = {"accept": "application/json"} async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") self.config = ota_config await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: async with req.get(self.UPDATE_URL) as rsp: if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json() self.debug("Finished downloading firmware update list") self._cache.clear() for _model, firmwares in fw_lst.items(): for firmware in firmwares: img = INOVELLIImage.from_json(firmware) # Only replace the previously-cached image if its version is smaller if ( img.key in self._cache and self._cache[img.key].version > img.version ): continue self._cache[img.key] = img self.update_expiration() async def filter_get_image(self, key: ImageKey) -> bool: return key.manufacturer_id != self.MANUFACTURER_ID @attr.s class ThirdRealityImage: model = attr.ib() url = attr.ib() version = attr.ib() image_type = attr.ib() manufacturer_id = attr.ib() file_version = attr.ib() @classmethod def from_json(cls, obj: dict[str, typing.Any]) -> ThirdRealityImage: return cls( model=obj["modelId"], url=obj["url"], version=obj["version"], image_type=obj["imageType"], manufacturer_id=obj["manufacturerId"], file_version=obj["fileVersion"], ) @property def key(self) -> ImageKey: return ImageKey(self.manufacturer_id, self.image_type) async def fetch_image(self) -> BaseOTAImage: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.url, self.key) async with req.get(self.url) as rsp: data = await rsp.read() ota_image, _ = parse_ota_image(data) assert ota_image.header.key == self.key LOGGER.debug( "Finished downloading from %s for %s ver %s", self.url, self.key, self.version, ) return ota_image class ThirdReality(Basic): """Third Reality OTA Firmware provider.""" UPDATE_URL = "https://tr-zha.s3.amazonaws.com/firmware.json" MANUFACTURER_IDS = (4659, 4877) HEADERS = {"accept": "application/json"} async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") self.config = ota_config await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: async with req.get(self.UPDATE_URL) as rsp: if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json() self.debug("Finished downloading firmware update list") self._cache.clear() for firmware in fw_lst["versions"]: img = ThirdRealityImage.from_json(firmware) self._cache[img.key] = img self.update_expiration() async def filter_get_image(self, key: ImageKey) -> bool: return key.manufacturer_id not in self.MANUFACTURER_IDS @attr.s class RemoteImage: binary_url = attr.ib() file_version = attr.ib() image_type = attr.ib() manufacturer_id = attr.ib() changelog = attr.ib() checksum = attr.ib() # Optional min_hardware_version = attr.ib() max_hardware_version = attr.ib() min_current_file_version = attr.ib() max_current_file_version = attr.ib() @classmethod def from_json(cls, obj: dict[str, typing.Any]) -> RemoteImage: return cls( binary_url=obj["binary_url"], file_version=obj["file_version"], image_type=obj["image_type"], manufacturer_id=obj["manufacturer_id"], changelog=obj["changelog"], checksum=obj["checksum"], min_hardware_version=obj.get("min_hardware_version"), max_hardware_version=obj.get("max_hardware_version"), min_current_file_version=obj.get("min_current_file_version"), max_current_file_version=obj.get("max_current_file_version"), ) @property def key(self) -> ImageKey: return ImageKey(self.manufacturer_id, self.image_type) async def fetch_image(self) -> BaseOTAImage: async with aiohttp.ClientSession() as req: LOGGER.debug("Downloading %s for %s", self.binary_url, self.key) async with req.get(self.binary_url) as rsp: data = await rsp.read() algorithm, checksum = self.checksum.split(":") hasher = hashlib.new(algorithm) await asyncio.get_running_loop().run_in_executor(None, hasher.update, data) if hasher.hexdigest() != checksum: raise ValueError( f"Image checksum is invalid: expected {self.checksum}," f" got {hasher.hexdigest()}" ) ota_image, _ = parse_ota_image(data) LOGGER.debug("Finished downloading %s", self) return ota_image class RemoteProvider(Basic): """Generic zigpy OTA URL provider.""" HEADERS = {"accept": "application/json"} def __init__(self, url: str, manufacturer_ids: list[int] | None) -> None: super().__init__() self.url = url self.manufacturer_ids = manufacturer_ids async def initialize_provider(self, ota_config: dict) -> None: self.info("OTA provider enabled") await self.refresh_firmware_list() self.enable() async def refresh_firmware_list(self) -> None: if self._locks[LOCK_REFRESH].locked(): return async with self._locks[LOCK_REFRESH]: async with aiohttp.ClientSession(headers=self.HEADERS) as req: async with req.get(self.url) as rsp: if not (200 <= rsp.status <= 299): self.warning( "Couldn't download '%s': %s/%s", rsp.url, rsp.status, rsp.reason, ) return fw_lst = await rsp.json() self.debug("Finished downloading firmware update list") self._cache.clear() for obj in fw_lst: img = RemoteImage.from_json(obj) self._cache[img.key] = img self.update_expiration() async def filter_get_image(self, key: ImageKey) -> bool: if not self.manufacturer_ids: return False return key.manufacturer_id not in self.manufacturer_ids zigpy-0.62.3/zigpy/ota/validators.py000066400000000000000000000102361456054056700174310ustar00rootroot00000000000000from __future__ import annotations import enum import logging import typing import zlib from zigpy.ota.image import BaseOTAImage, ElementTagId, OTAImage VALID_SILABS_CRC = 0x2144DF1C # CRC32(anything | CRC32(anything)) == CRC32(0x00000000) LOGGER = logging.getLogger(__name__) class ValidationResult(enum.Enum): INVALID = 0 VALID = 1 UNKNOWN = 2 class ValidationError(Exception): pass def parse_silabs_ebl(data: bytes) -> typing.Iterable[tuple[bytes, bytes]]: """Parses a Silicon Labs EBL firmware image.""" if len(data) % 64 != 0: raise ValidationError( f"Image size ({len(data)}) must be a multiple of 64 bytes" ) orig_data = data while True: if len(data) < 4: raise ValidationError( "Image is truncated: not long enough to contain a valid tag" ) tag = data[:2] length = int.from_bytes(data[2:4], "big") value = data[4 : 4 + length] if len(value) < length: raise ValidationError("Image is truncated: tag value is cut off") data = data[4 + length :] yield tag, value # EBL end tag if tag != b"\xFC\x04": continue # At this point the EBL should contain nothing but padding if data.strip(b"\xFF"): raise ValidationError("Image padding contains invalid bytes") unpadded_image = orig_data[: -len(data)] if data else orig_data computed_crc = zlib.crc32(unpadded_image) if computed_crc != VALID_SILABS_CRC: raise ValidationError( f"Image CRC-32 is invalid:" f" expected 0x{VALID_SILABS_CRC:08X}, got 0x{computed_crc:08X}" ) break # pragma: no cover def parse_silabs_gbl(data: bytes) -> typing.Iterable[tuple[bytes, bytes]]: """Parses a Silicon Labs GBL firmware image.""" orig_data = data while True: if len(data) < 8: raise ValidationError( "Image is truncated: not long enough to contain a valid tag" ) tag = data[:4] length = int.from_bytes(data[4:8], "little") value = data[8 : 8 + length] if len(value) < length: raise ValidationError("Image is truncated: tag value is cut off") data = data[8 + length :] yield tag, value # GBL end tag if tag != b"\xFC\x04\x04\xFC": continue # GBL images aren't expected to contain padding but some are (i.e. Hue) unpadded_image = orig_data[: -len(data)] if data else orig_data computed_crc = zlib.crc32(unpadded_image) if computed_crc != VALID_SILABS_CRC: raise ValidationError( f"Image CRC-32 is invalid:" f" expected 0x{VALID_SILABS_CRC:08X}, got 0x{computed_crc:08X}" ) break # pragma: no cover def validate_firmware(data: bytes) -> ValidationResult: """Validates a firmware image.""" parser = None if data.startswith(b"\xEB\x17\xA6\x03"): parser = parse_silabs_gbl elif data.startswith(b"\x00\x00\x00\x8C"): parser = parse_silabs_ebl else: return ValidationResult.UNKNOWN tuple(parser(data)) return ValidationResult.VALID def validate_ota_image(image: BaseOTAImage) -> ValidationResult: """Validates a Zigbee OTA image's embedded firmwares and indicates if an image is valid, invalid, or of an unknown type. """ if not isinstance(image, OTAImage): return ValidationResult.UNKNOWN results = [] for subelement in image.subelements: if subelement.tag_id == ElementTagId.UPGRADE_IMAGE: results.append(validate_firmware(subelement.data)) if not results or any(r == ValidationResult.UNKNOWN for r in results): return ValidationResult.UNKNOWN return ValidationResult.VALID def check_invalid(image: BaseOTAImage) -> bool: """Checks if an image is invalid or not. Unknown image types are considered valid.""" try: validate_ota_image(image) return False except ValidationError as e: LOGGER.warning("Image %s is invalid: %s", image.header, e) return True zigpy-0.62.3/zigpy/profiles/000077500000000000000000000000001456054056700157455ustar00rootroot00000000000000zigpy-0.62.3/zigpy/profiles/__init__.py000066400000000000000000000002141456054056700200530ustar00rootroot00000000000000from __future__ import annotations from . import zgp, zha, zll PROFILES = {zha.PROFILE_ID: zha, zll.PROFILE_ID: zll, zgp.PROFILE_ID: zgp} zigpy-0.62.3/zigpy/profiles/zgp.py000066400000000000000000000011321456054056700171140ustar00rootroot00000000000000from __future__ import annotations import zigpy.types as t PROFILE_ID = 41440 class DeviceType(t.enum16): PROXY = 0x0060 PROXY_BASIC = 0x0061 TARGET_PLUS = 0x0062 TARGET = 0x0063 COMM_TOOL = 0x0064 COMBO = 0x0065 COMBO_BASIC = 0x0066 CLUSTERS = { DeviceType.PROXY: ([0x0021], [0x0021]), DeviceType.PROXY_BASIC: ([], [0x0021]), DeviceType.TARGET_PLUS: ([0x0021], [0x0021]), DeviceType.TARGET: ([0x0021], [0x0021]), DeviceType.COMM_TOOL: ([0x0021], []), DeviceType.COMBO: ([0x0021], [0x0021]), DeviceType.COMBO_BASIC: ([0x0021], [0x0021]), } zigpy-0.62.3/zigpy/profiles/zha.py000066400000000000000000000067441456054056700171140ustar00rootroot00000000000000from __future__ import annotations import zigpy.types as t PROFILE_ID = 260 class DeviceType(t.enum16): # Generic ON_OFF_SWITCH = 0x0000 LEVEL_CONTROL_SWITCH = 0x0001 ON_OFF_OUTPUT = 0x0002 LEVEL_CONTROLLABLE_OUTPUT = 0x0003 SCENE_SELECTOR = 0x0004 CONFIGURATION_TOOL = 0x0005 REMOTE_CONTROL = 0x0006 COMBINED_INTERFACE = 0x0007 RANGE_EXTENDER = 0x0008 MAIN_POWER_OUTLET = 0x0009 DOOR_LOCK = 0x000A DOOR_LOCK_CONTROLLER = 0x000B SIMPLE_SENSOR = 0x000C CONSUMPTION_AWARENESS_DEVICE = 0x000D HOME_GATEWAY = 0x0050 SMART_PLUG = 0x0051 WHITE_GOODS = 0x0052 METER_INTERFACE = 0x0053 # Lighting ON_OFF_LIGHT = 0x0100 DIMMABLE_LIGHT = 0x0101 COLOR_DIMMABLE_LIGHT = 0x0102 ON_OFF_LIGHT_SWITCH = 0x0103 DIMMER_SWITCH = 0x0104 COLOR_DIMMER_SWITCH = 0x0105 LIGHT_SENSOR = 0x0106 OCCUPANCY_SENSOR = 0x0107 # ZLO device types ON_OFF_BALLAST = 0x0108 DIMMABLE_BALLAST = 0x0109 ON_OFF_PLUG_IN_UNIT = 0x010A DIMMABLE_PLUG_IN_UNIT = 0x010B COLOR_TEMPERATURE_LIGHT = 0x010C EXTENDED_COLOR_LIGHT = 0x010D LIGHT_LEVEL_SENSOR = 0x010E # Closure SHADE = 0x0200 SHADE_CONTROLLER = 0x0201 WINDOW_COVERING_DEVICE = 0x0202 WINDOW_COVERING_CONTROLLER = 0x0203 # HVAC HEATING_COOLING_UNIT = 0x0300 THERMOSTAT = 0x0301 TEMPERATURE_SENSOR = 0x0302 PUMP = 0x0303 PUMP_CONTROLLER = 0x0304 PRESSURE_SENSOR = 0x0305 FLOW_SENSOR = 0x0306 MINI_SPLIT_AC = 0x0307 # Intruder Alarm Systems IAS_CONTROL = 0x0400 # IAS Control and Indicating Equipment IAS_ANCILLARY_CONTROL = 0x0401 # IAS Ancillary Control Equipment IAS_ZONE = 0x0402 IAS_WARNING_DEVICE = 0x0403 # ZLO device types, continued COLOR_CONTROLLER = 0x0800 COLOR_SCENE_CONTROLLER = 0x0810 NON_COLOR_CONTROLLER = 0x0820 NON_COLOR_SCENE_CONTROLLER = 0x0830 CONTROL_BRIDGE = 0x0840 ON_OFF_SENSOR = 0x0850 CLUSTERS = { # Generic DeviceType.ON_OFF_SWITCH: ([0x0007], [0x0004, 0x0005, 0x0006]), DeviceType.LEVEL_CONTROL_SWITCH: ([0x0007], [0x0004, 0x0005, 0x0006, 0x0008]), DeviceType.ON_OFF_OUTPUT: ([0x0004, 0x0005, 0x0006], []), DeviceType.LEVEL_CONTROLLABLE_OUTPUT: ([0x0004, 0x0005, 0x0006, 0x0008], []), DeviceType.SCENE_SELECTOR: ([], [0x0004, 0x0005]), DeviceType.REMOTE_CONTROL: ([], [0x0004, 0x0005, 0x0006, 0x0008]), DeviceType.MAIN_POWER_OUTLET: ([0x0004, 0x0005, 0x0006], []), DeviceType.SMART_PLUG: ([0x0004, 0x0005, 0x0006], []), # Lighting DeviceType.ON_OFF_LIGHT: ([0x0004, 0x0005, 0x0006, 0x0008], []), DeviceType.DIMMABLE_LIGHT: ([0x0004, 0x0005, 0x0006, 0x0008], []), DeviceType.COLOR_DIMMABLE_LIGHT: ([0x0004, 0x0005, 0x0006, 0x0008, 0x0300], []), DeviceType.ON_OFF_LIGHT_SWITCH: ([0x0007], [0x0004, 0x0005, 0x0006]), DeviceType.DIMMER_SWITCH: ([0x0007], [0x0004, 0x0005, 0x0006, 0x0008]), DeviceType.COLOR_DIMMER_SWITCH: ( [0x0007], [0x0004, 0x0005, 0x0006, 0x0008, 0x0300], ), DeviceType.LIGHT_SENSOR: ([0x0400], []), DeviceType.OCCUPANCY_SENSOR: ([0x0406], []), DeviceType.COLOR_TEMPERATURE_LIGHT: ( [0x0003, 0x0004, 0x0005, 0x0006, 0x0008, 0x0300], [], ), DeviceType.EXTENDED_COLOR_LIGHT: ( [0x0003, 0x0004, 0x0005, 0x0006, 0x0008, 0x0300], [], ), # Closures DeviceType.WINDOW_COVERING_DEVICE: ([0x0004, 0x0005, 0x0102], []), # HVAC DeviceType.THERMOSTAT: ([0x0201, 0x0204], [0x0200, 0x0202, 0x0203]), } zigpy-0.62.3/zigpy/profiles/zll.py000066400000000000000000000031131456054056700171160ustar00rootroot00000000000000from __future__ import annotations import zigpy.types as t PROFILE_ID = 49246 class DeviceType(t.enum16): ON_OFF_LIGHT = 0x0000 ON_OFF_PLUGIN_UNIT = 0x0010 DIMMABLE_LIGHT = 0x0100 DIMMABLE_PLUGIN_UNIT = 0x0110 COLOR_LIGHT = 0x0200 EXTENDED_COLOR_LIGHT = 0x0210 COLOR_TEMPERATURE_LIGHT = 0x0220 COLOR_CONTROLLER = 0x0800 COLOR_SCENE_CONTROLLER = 0x0810 CONTROLLER = 0x0820 SCENE_CONTROLLER = 0x0830 CONTROL_BRIDGE = 0x0840 ON_OFF_SENSOR = 0x0850 CLUSTERS = { DeviceType.ON_OFF_LIGHT: ([0x0004, 0x0005, 0x0006, 0x0008, 0x1000], []), DeviceType.ON_OFF_PLUGIN_UNIT: ([0x0004, 0x0005, 0x0006, 0x0008, 0x1000], []), DeviceType.DIMMABLE_LIGHT: ([0x0004, 0x0005, 0x0006, 0x0008, 0x1000], []), DeviceType.DIMMABLE_PLUGIN_UNIT: ([0x0004, 0x0005, 0x0006, 0x0008, 0x1000], []), DeviceType.COLOR_LIGHT: ([0x0004, 0x0005, 0x0006, 0x0008, 0x0300, 0x1000], []), DeviceType.EXTENDED_COLOR_LIGHT: ( [0x0004, 0x0005, 0x0006, 0x0008, 0x0300, 0x1000], [], ), DeviceType.COLOR_TEMPERATURE_LIGHT: ( [0x0004, 0x0005, 0x0006, 0x0008, 0x0300, 0x1000], [], ), DeviceType.COLOR_CONTROLLER: ([], [0x0004, 0x0006, 0x0008, 0x0300]), DeviceType.COLOR_SCENE_CONTROLLER: ([], [0x0004, 0x0005, 0x0006, 0x0008, 0x0300]), DeviceType.CONTROLLER: ([], [0x0004, 0x0006, 0x0008]), DeviceType.SCENE_CONTROLLER: ([], [0x0004, 0x0005, 0x0006, 0x0008]), DeviceType.CONTROL_BRIDGE: ([], [0x0004, 0x0005, 0x0006, 0x0008, 0x0300]), DeviceType.ON_OFF_SENSOR: ([], [0x0004, 0x0005, 0x0006, 0x0008, 0x0300]), } zigpy-0.62.3/zigpy/quirks/000077500000000000000000000000001456054056700154405ustar00rootroot00000000000000zigpy-0.62.3/zigpy/quirks/__init__.py000066400000000000000000000272631456054056700175630ustar00rootroot00000000000000from __future__ import annotations import logging import typing from zigpy.const import ( # noqa: F401 SIG_ENDPOINTS, SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE, SIG_MANUFACTURER, SIG_MODEL, SIG_MODELS_INFO, SIG_NODE_DESC, SIG_SKIP_CONFIG, ) import zigpy.device import zigpy.endpoint from zigpy.quirks.registry import DeviceRegistry # noqa: F401 import zigpy.types as t from zigpy.types.basic import uint16_t import zigpy.zcl import zigpy.zcl.foundation as foundation if typing.TYPE_CHECKING: from zigpy.application import ControllerApplication _LOGGER = logging.getLogger(__name__) _DEVICE_REGISTRY = DeviceRegistry() _uninitialized_device_message_handlers = [] def get_device( device: zigpy.device.Device, registry: DeviceRegistry | None = None ) -> zigpy.device.Device: """Get a CustomDevice object, if one is available""" if registry is None: return _DEVICE_REGISTRY.get_device(device) return registry.get_device(device) def get_quirk_list( manufacturer: str, model: str, registry: DeviceRegistry | None = None ): """Get the Quirk list for a given manufacturer and model.""" if registry is None: return _DEVICE_REGISTRY.registry[manufacturer][model] return registry.registry[manufacturer][model] def register_uninitialized_device_message_handler(handler: typing.Callable) -> None: """Register an handler for messages received by uninitialized devices. each handler is passed same parameters as zigpy.application.ControllerApplication.handle_message """ if handler not in _uninitialized_device_message_handlers: _uninitialized_device_message_handlers.append(handler) class CustomDevice(zigpy.device.Device): replacement: dict[str, typing.Any] = {} signature = None def __init_subclass__(cls) -> None: if getattr(cls, "signature", None) is not None: _DEVICE_REGISTRY.add_to_registry(cls) def __init__( self, application: ControllerApplication, ieee: t.EUI64, nwk: t.NWK, replaces: zigpy.device.Device, ) -> None: super().__init__(application, ieee, nwk) def set_device_attr(attr): if attr in self.replacement: setattr(self, attr, self.replacement[attr]) else: setattr(self, attr, getattr(replaces, attr)) for attr in ("lqi", "rssi", "last_seen", "relays"): setattr(self, attr, getattr(replaces, attr)) set_device_attr("status") set_device_attr(SIG_NODE_DESC) set_device_attr(SIG_MANUFACTURER) set_device_attr(SIG_MODEL) set_device_attr(SIG_SKIP_CONFIG) for endpoint_id, _endpoint in self.replacement.get(SIG_ENDPOINTS, {}).items(): self.add_endpoint(endpoint_id, replace_device=replaces) def add_endpoint( self, endpoint_id: int, replace_device: zigpy.device.Device | None = None ) -> zigpy.endpoint.Endpoint: if endpoint_id not in self.replacement.get(SIG_ENDPOINTS, {}): return super().add_endpoint(endpoint_id) endpoints = self.replacement[SIG_ENDPOINTS] if isinstance(endpoints[endpoint_id], tuple): custom_ep_type = endpoints[endpoint_id][0] replacement_data = endpoints[endpoint_id][1] else: custom_ep_type = CustomEndpoint replacement_data = endpoints[endpoint_id] ep = custom_ep_type(self, endpoint_id, replacement_data, replace_device) self.endpoints[endpoint_id] = ep return ep class CustomEndpoint(zigpy.endpoint.Endpoint): def __init__( self, device: CustomDevice, endpoint_id: int, replacement_data: dict[str, typing.Any], replace_device: zigpy.device.Device, ) -> None: super().__init__(device, endpoint_id) def set_device_attr(attr): if attr in replacement_data: setattr(self, attr, replacement_data[attr]) else: setattr(self, attr, getattr(replace_device[endpoint_id], attr)) set_device_attr(SIG_EP_PROFILE) set_device_attr(SIG_EP_TYPE) self.status = zigpy.endpoint.Status.ZDO_INIT for c in replacement_data.get(SIG_EP_INPUT, []): if isinstance(c, int): cluster = None cluster_id = c else: cluster = c(self, is_server=True) cluster_id = cluster.cluster_id self.add_input_cluster(cluster_id, cluster) for c in replacement_data.get(SIG_EP_OUTPUT, []): if isinstance(c, int): cluster = None cluster_id = c else: cluster = c(self, is_server=False) cluster_id = cluster.cluster_id self.add_output_cluster(cluster_id, cluster) class CustomCluster(zigpy.zcl.Cluster): _skip_registry = True _CONSTANT_ATTRIBUTES: dict[int, typing.Any] | None = None manufacturer_id_override: t.uint16_t | None = None @property def _is_manuf_specific(self) -> bool: """Return True if cluster_id is within manufacturer specific range.""" return 0xFC00 <= self.cluster_id <= 0xFFFF def _has_manuf_attr(self, attrs_to_process: typing.Iterable | list | dict) -> bool: """Return True if contains a manufacturer specific attribute.""" if self._is_manuf_specific: return True for attr_id in attrs_to_process: if ( attr_id in self.attributes and self.attributes[attr_id].is_manufacturer_specific ): return True return False async def command( self, command_id: foundation.GeneralCommand | int | t.uint8_t, *args, manufacturer: int | t.uint16_t | None = None, expect_reply: bool = True, tsn: int | t.uint8_t | None = None, **kwargs: typing.Any, ) -> typing.Coroutine: command = self.server_commands[command_id] if manufacturer is None and ( self._is_manuf_specific or command.is_manufacturer_specific ): manufacturer = self.endpoint.manufacturer_id return await self.request( False, command.id, command.schema, *args, manufacturer=manufacturer, expect_reply=expect_reply, tsn=tsn, **kwargs, ) async def client_command( self, command_id: foundation.GeneralCommand | int | t.uint8_t, *args, manufacturer: int | t.uint16_t | None = None, tsn: int | t.uint8_t | None = None, **kwargs: typing.Any, ): command = self.client_commands[command_id] if manufacturer is None and ( self._is_manuf_specific or command.is_manufacturer_specific ): manufacturer = self.endpoint.manufacturer_id return await self.reply( False, command.id, command.schema, *args, manufacturer=manufacturer, tsn=tsn, **kwargs, ) async def read_attributes_raw( self, attributes: list[uint16_t], manufacturer: uint16_t | None = None ): if not self._CONSTANT_ATTRIBUTES: return await super().read_attributes_raw( attributes, manufacturer=manufacturer ) succeeded = [ foundation.ReadAttributeRecord( attr, foundation.Status.SUCCESS, foundation.TypeValue() ) for attr in attributes if attr in self._CONSTANT_ATTRIBUTES ] for record in succeeded: record.value.value = self._CONSTANT_ATTRIBUTES[record.attrid] attrs_to_read = [ attr for attr in attributes if attr not in self._CONSTANT_ATTRIBUTES ] if not attrs_to_read: return [succeeded] results = await super().read_attributes_raw( attrs_to_read, manufacturer=manufacturer ) if not isinstance(results[0], list): for attrid in attrs_to_read: succeeded.append( foundation.ReadAttributeRecord( attrid, results[0], foundation.TypeValue(), ) ) else: succeeded.extend(results[0]) return [succeeded] async def _configure_reporting( # type:ignore[override] self, config_records: list[foundation.AttributeReportingConfig], *args, manufacturer: int | t.uint16_t | None = None, **kwargs, ): """Configure reporting ZCL foundation command.""" if manufacturer is None and self._has_manuf_attr( [a.attrid for a in config_records] ): manufacturer = self.endpoint.manufacturer_id return await super()._configure_reporting( config_records, *args, manufacturer=manufacturer, **kwargs, ) async def _read_attributes( # type:ignore[override] self, attribute_ids: list[t.uint16_t], *args, manufacturer: int | t.uint16_t | None = None, **kwargs, ): """Read attributes ZCL foundation command.""" if manufacturer is None and self._has_manuf_attr(attribute_ids): manufacturer = self.endpoint.manufacturer_id return await super()._read_attributes( attribute_ids, *args, manufacturer=manufacturer, **kwargs ) async def _write_attributes( # type:ignore[override] self, attributes: list[foundation.Attribute], *args, manufacturer: int | t.uint16_t | None = None, **kwargs, ): """Write attribute ZCL foundation command.""" if manufacturer is None and self._has_manuf_attr( [a.attrid for a in attributes] ): manufacturer = self.endpoint.manufacturer_id return await super()._write_attributes( attributes, *args, manufacturer=manufacturer, **kwargs ) async def _write_attributes_undivided( # type:ignore[override] self, attributes: list[foundation.Attribute], *args, manufacturer: int | t.uint16_t | None = None, **kwargs, ): """Write attribute undivided ZCL foundation command.""" if manufacturer is None and self._has_manuf_attr( [a.attrid for a in attributes] ): manufacturer = self.endpoint.manufacturer_id return await super()._write_attributes_undivided( attributes, *args, manufacturer=manufacturer, **kwargs ) def get(self, key: int | str, default: typing.Any | None = None) -> typing.Any: """Get cached attribute.""" try: attr_def = self.find_attribute(key) except KeyError: return super().get(key, default) # Ensure we check the constant attributes dictionary first, since their values # will not be in the attribute cache but can be read immediately. if ( self._CONSTANT_ATTRIBUTES is not None and attr_def.id in self._CONSTANT_ATTRIBUTES ): return self._CONSTANT_ATTRIBUTES[attr_def.id] return super().get(key, default) def handle_message_from_uninitialized_sender( sender: zigpy.device.Device, profile: int, cluster: int, src_ep: int, dst_ep: int, message: bytes, ) -> None: """Processes message from an uninitialized sender.""" for handler in _uninitialized_device_message_handlers: if handler(sender, profile, cluster, src_ep, dst_ep, message): break zigpy-0.62.3/zigpy/quirks/registry.py000066400000000000000000000132021456054056700176600ustar00rootroot00000000000000from __future__ import annotations import collections import itertools import logging import typing from zigpy.const import ( SIG_ENDPOINTS, SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE, SIG_MANUFACTURER, SIG_MODEL, SIG_MODELS_INFO, ) import zigpy.quirks from zigpy.typing import CustomDeviceType, DeviceType _LOGGER = logging.getLogger(__name__) TYPE_MANUF_QUIRKS_DICT = typing.Dict[ typing.Optional[str], typing.Dict[typing.Optional[str], typing.List["zigpy.quirks.CustomDevice"]], ] class DeviceRegistry: def __init__(self, *args, **kwargs) -> None: self._registry: TYPE_MANUF_QUIRKS_DICT = collections.defaultdict( lambda: collections.defaultdict(list) ) def add_to_registry(self, custom_device: CustomDeviceType) -> None: """Add a device to the registry""" models_info = custom_device.signature.get(SIG_MODELS_INFO) if models_info: for manuf, model in models_info: if custom_device not in self.registry[manuf][model]: self.registry[manuf][model].insert(0, custom_device) else: manufacturer = custom_device.signature.get(SIG_MANUFACTURER) model = custom_device.signature.get(SIG_MODEL) if custom_device not in self.registry[manufacturer][model]: self.registry[manufacturer][model].insert(0, custom_device) def remove(self, custom_device: CustomDeviceType) -> None: models_info = custom_device.signature.get(SIG_MODELS_INFO) if models_info: for manuf, model in models_info: self.registry[manuf][model].remove(custom_device) else: manufacturer = custom_device.signature.get(SIG_MANUFACTURER) model = custom_device.signature.get(SIG_MODEL) self.registry[manufacturer][model].remove(custom_device) def get_device(self, device: DeviceType) -> CustomDeviceType | DeviceType: """Get a CustomDevice object, if one is available""" if isinstance(device, zigpy.quirks.CustomDevice): return device dev_ep = set(device.endpoints) - {0} _LOGGER.debug( "Checking quirks for %s %s (%s)", device.manufacturer, device.model, device.ieee, ) for candidate in itertools.chain( self.registry[device.manufacturer][device.model], self.registry[device.manufacturer][None], self.registry[None][device.model], self.registry[None][None], ): _LOGGER.debug("Considering %s", candidate) if device.model != candidate.signature.get(SIG_MODEL, device.model): _LOGGER.debug("Fail, because device model mismatch: '%s'", device.model) continue if device.manufacturer != candidate.signature.get( SIG_MANUFACTURER, device.manufacturer ): _LOGGER.debug( "Fail, because device manufacturer mismatch: '%s'", device.manufacturer, ) continue sig = candidate.signature.get(SIG_ENDPOINTS) if sig is None: continue if not self._match(sig, dev_ep): _LOGGER.debug( "Fail because endpoint list mismatch: %s %s", set(sig.keys()), dev_ep, ) continue if not all( device[eid].profile_id == sig[eid].get(SIG_EP_PROFILE, device[eid].profile_id) for eid in sig ): _LOGGER.debug( "Fail because profile_id mismatch on at least one endpoint" ) continue if not all( device[eid].device_type == sig[eid].get(SIG_EP_TYPE, device[eid].device_type) for eid in sig ): _LOGGER.debug( "Fail because device_type mismatch on at least one endpoint" ) continue if not all( self._match(device[eid].in_clusters, ep.get(SIG_EP_INPUT, [])) for eid, ep in sig.items() ): _LOGGER.debug( "Fail because input cluster mismatch on at least one endpoint" ) continue if not all( self._match(device[eid].out_clusters, ep.get(SIG_EP_OUTPUT, [])) for eid, ep in sig.items() ): _LOGGER.debug( "Fail because output cluster mismatch on at least one endpoint" ) continue _LOGGER.debug( "Found custom device replacement for %s: %s", device.ieee, candidate ) device = candidate(device._application, device.ieee, device.nwk, device) break return device @staticmethod def _match(a: dict | typing.Iterable, b: dict | typing.Iterable) -> bool: return set(a) == set(b) @property def registry(self) -> TYPE_MANUF_QUIRKS_DICT: return self._registry def __contains__(self, device: CustomDeviceType) -> bool: manufacturer, model = device.signature.get( SIG_MODELS_INFO, [(device.signature.get(SIG_MANUFACTURER), device.signature.get(SIG_MODEL))], )[0] return device in itertools.chain( self.registry[manufacturer][model], self.registry[manufacturer][None], self.registry[None][None], ) zigpy-0.62.3/zigpy/serial.py000066400000000000000000000031101456054056700157460ustar00rootroot00000000000000from __future__ import annotations import asyncio import logging import typing import urllib.parse import async_timeout import serial as pyserial LOGGER = logging.getLogger(__name__) DEFAULT_SOCKET_PORT = 6638 SOCKET_CONNECT_TIMEOUT = 5 try: import serial_asyncio_fast as pyserial_asyncio LOGGER.info("Using pyserial-asyncio-fast in place of pyserial-asyncio") except ImportError: import serial_asyncio as pyserial_asyncio async def create_serial_connection( loop: asyncio.BaseEventLoop, protocol_factory: typing.Callable[[], asyncio.Protocol], url: str, *, parity=pyserial.PARITY_NONE, stopbits=pyserial.STOPBITS_ONE, **kwargs: typing.Any, ) -> tuple[asyncio.Transport, asyncio.Protocol]: """Wrapper around pyserial-asyncio that transparently substitutes a normal TCP transport and protocol when a `socket` connection URI is provided. """ baudrate: int | None = kwargs.get("baudrate") LOGGER.debug("Opening a serial connection to %r (%s baudrate)", url, baudrate) parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme in ("socket", "tcp"): async with async_timeout.timeout(SOCKET_CONNECT_TIMEOUT): transport, protocol = await loop.create_connection( protocol_factory=protocol_factory, host=parsed_url.hostname, port=parsed_url.port or DEFAULT_SOCKET_PORT, ) else: transport, protocol = await pyserial_asyncio.create_serial_connection( loop, protocol_factory, url=url, **kwargs ) return transport, protocol zigpy-0.62.3/zigpy/state.py000066400000000000000000000270241456054056700156210ustar00rootroot00000000000000"""Classes to implement status of the application controller.""" from __future__ import annotations from collections.abc import Iterable import dataclasses from dataclasses import InitVar import functools from typing import Any, Iterator import zigpy.config as conf import zigpy.types as t import zigpy.util import zigpy.zdo.types as zdo_t LOGICAL_TYPE_TO_JSON = { zdo_t.LogicalType.Coordinator: "coordinator", zdo_t.LogicalType.Router: "router", zdo_t.LogicalType.EndDevice: "end_device", } JSON_TO_LOGICAL_TYPE = {v: k for k, v in LOGICAL_TYPE_TO_JSON.items()} @dataclasses.dataclass class Key(t.BaseDataclassMixin): """APS/TC Link key.""" key: t.KeyData = dataclasses.field(default_factory=lambda: t.KeyData.UNKNOWN) tx_counter: t.uint32_t = 0 rx_counter: t.uint32_t = 0 seq: t.uint8_t = 0 partner_ieee: t.EUI64 = dataclasses.field(default_factory=lambda: t.EUI64.UNKNOWN) def as_dict(self) -> dict[str, Any]: return { "key": str(t.KeyData(self.key)), "tx_counter": self.tx_counter, "rx_counter": self.rx_counter, "seq": self.seq, "partner_ieee": str(self.partner_ieee), } @classmethod def from_dict(cls, obj: dict[str, Any]) -> Key: return cls( key=t.KeyData.convert(obj["key"]), tx_counter=obj["tx_counter"], rx_counter=obj["rx_counter"], seq=obj["seq"], partner_ieee=t.EUI64.convert(obj["partner_ieee"]), ) @dataclasses.dataclass class NodeInfo(t.BaseDataclassMixin): """Controller Application network Node information.""" nwk: t.NWK = t.NWK(0xFFFE) ieee: t.EUI64 = dataclasses.field(default_factory=lambda: t.EUI64.UNKNOWN) logical_type: zdo_t.LogicalType = zdo_t.LogicalType.EndDevice # Device information model: str | None = None manufacturer: str | None = None version: str | None = None def as_dict(self) -> dict[str, Any]: return { "nwk": str(self.nwk)[2:], "ieee": str(self.ieee), "logical_type": LOGICAL_TYPE_TO_JSON[self.logical_type], "model": self.model, "manufacturer": self.manufacturer, "version": self.version, } @classmethod def from_dict(cls, obj: dict[str, Any]) -> NodeInfo: return cls( nwk=t.NWK.convert(obj["nwk"]), ieee=t.EUI64.convert(obj["ieee"]), logical_type=JSON_TO_LOGICAL_TYPE[obj["logical_type"]], model=obj["model"], manufacturer=obj["manufacturer"], version=obj["version"], ) @dataclasses.dataclass class NetworkInfo(t.BaseDataclassMixin): """Network information.""" extended_pan_id: t.ExtendedPanId = dataclasses.field( default_factory=lambda: t.ExtendedPanId.UNKNOWN ) pan_id: t.PanId = t.PanId(0xFFFE) nwk_update_id: t.uint8_t = t.uint8_t(0x00) nwk_manager_id: t.NWK = t.NWK(0x0000) channel: t.uint8_t = 0 channel_mask: t.Channels = t.Channels.NO_CHANNELS security_level: t.uint8_t = 0 network_key: Key = dataclasses.field(default_factory=Key) tc_link_key: Key = dataclasses.field( default_factory=lambda: Key( key=conf.CONF_NWK_TC_LINK_KEY_DEFAULT, tx_counter=0, rx_counter=0, seq=0, partner_ieee=t.EUI64.UNKNOWN, ) ) key_table: list[Key] = dataclasses.field(default_factory=list) children: list[t.EUI64] = dataclasses.field(default_factory=list) # If exposed by the stack, NWK addresses of other connected devices on the network nwk_addresses: dict[t.EUI64, t.NWK] = dataclasses.field(default_factory=dict) # dict to keep track of stack-specific network information. # Z-Stack, for example, has a TCLK_SEED that should be backed up. stack_specific: dict[str, Any] = dataclasses.field(default_factory=dict) # Internal metadata not directly used for network restoration metadata: dict[str, Any] = dataclasses.field(default_factory=dict) # Package generating the network information source: str | None = None def as_dict(self) -> dict[str, Any]: return { "extended_pan_id": str(self.extended_pan_id), "pan_id": str(t.PanId(self.pan_id))[2:], "nwk_update_id": self.nwk_update_id, "nwk_manager_id": str(t.NWK(self.nwk_manager_id))[2:], "channel": self.channel, "channel_mask": list(self.channel_mask), "security_level": self.security_level, "network_key": self.network_key.as_dict(), "tc_link_key": self.tc_link_key.as_dict(), "key_table": [key.as_dict() for key in self.key_table], "children": sorted(str(ieee) for ieee in self.children), "nwk_addresses": { str(ieee): str(t.NWK(nwk))[2:] for ieee, nwk in sorted(self.nwk_addresses.items()) }, "stack_specific": self.stack_specific, "metadata": self.metadata, "source": self.source, } @classmethod def from_dict(cls, obj: dict[str, Any]) -> NetworkInfo: return cls( extended_pan_id=t.ExtendedPanId.convert(obj["extended_pan_id"]), pan_id=t.PanId.convert(obj["pan_id"]), nwk_update_id=obj["nwk_update_id"], nwk_manager_id=t.NWK.convert(obj["nwk_manager_id"]), channel=obj["channel"], channel_mask=t.Channels.from_channel_list(obj["channel_mask"]), security_level=obj["security_level"], network_key=Key.from_dict(obj["network_key"]), tc_link_key=Key.from_dict(obj["tc_link_key"]), key_table=sorted( (Key.from_dict(o) for o in obj["key_table"]), key=lambda k: k.partner_ieee, ), children=[t.EUI64.convert(ieee) for ieee in obj["children"]], nwk_addresses={ t.EUI64.convert(ieee): t.NWK.convert(nwk) for ieee, nwk in obj["nwk_addresses"].items() }, stack_specific=obj["stack_specific"], metadata=obj["metadata"], source=obj["source"], ) @dataclasses.dataclass class Counter(t.BaseDataclassMixin): """Ever increasing Counter.""" name: str initial_value: InitVar[int] = 0 _raw_value: int = dataclasses.field(init=False, default=0) reset_count: int = dataclasses.field(init=False, default=0) _last_reset_value: int = dataclasses.field(init=False, default=0) def __eq__(self, other) -> bool: """Compare two counters.""" if isinstance(other, self.__class__): return self.value == other.value return self.value == other def __int__(self) -> int: """Return int of the current value.""" return self.value def __post_init__(self, initial_value: int) -> None: """Initialize instance.""" self._raw_value = initial_value def __str__(self) -> str: """String representation.""" return f"{self.name} = {self.value}" @property def value(self) -> int: """Current value of the counter.""" return self._last_reset_value + self._raw_value def update(self, new_value: int) -> None: """Update counter value.""" if new_value == self._raw_value: return diff = new_value - self._raw_value if diff < 0: # Roll over or reset self.reset_and_update(new_value) return self._raw_value = new_value def increment(self, increment: int = 1) -> None: """Increment current value by increment.""" assert increment >= 0 self._raw_value += increment def reset_and_update(self, value: int) -> None: """Clear (rollover event) and optionally update.""" self._last_reset_value = self.value self._raw_value = value self.reset_count += 1 reset = functools.partialmethod(reset_and_update, 0) class CounterGroup(dict): """Named collection of related counters.""" def __init__( self, collection_name: str | None = None, ) -> None: """Initialize instance.""" self._name: str | None = collection_name super().__init__() def counters(self) -> Iterable[Counter]: """Return an iterable of the counters""" return (counter for counter in self.values() if isinstance(counter, Counter)) def groups(self) -> Iterable[CounterGroup]: """Return an iterable of the counter groups""" return (group for group in self.values() if isinstance(group, CounterGroup)) def tags(self) -> Iterable[int | str]: """Return an iterable if tags""" return (group.name for group in self.groups()) def __missing__(self, counter_id: Any) -> Counter: """Default counter factory.""" counter = Counter(counter_id) self[counter_id] = counter return counter def __repr__(self) -> str: """Representation magic method.""" counters = ( f"{counter.__class__.__name__}('{counter.name}', {int(counter)})" for counter in self.counters() ) counters = ", ".join(counters) return f"{self.__class__.__name__}('{self.name}', {{{counters}}})" def __str__(self) -> str: """String magic method.""" counters = [str(counter) for counter in self.counters()] return f"{self.name}: [{', '.join(counters)}]" @property def name(self) -> str: """Return counter collection name.""" return self._name if self._name is not None else "No Name" def increment(self, name: int | str, *tags: int | str) -> None: """Create and Update all counters recursively.""" if tags: tag, *rest = tags self.setdefault(tag, CounterGroup(tag)) self[tag][name].increment() self[tag].increment(name, *rest) return def reset(self) -> None: """Clear and rollover counters.""" for counter in self.values(): counter.reset() class CounterGroups(dict): """A collection of unrelated counter groups in a dict.""" def __iter__(self) -> Iterator[CounterGroup]: """Return an iterable of the counters""" return iter(self.values()) def __missing__(self, counter_group_name: Any) -> CounterGroup: """Default counter factory.""" counter_group = CounterGroup(counter_group_name) super().__setitem__(counter_group_name, counter_group) return counter_group @dataclasses.dataclass class State: node_info: NodeInfo = dataclasses.field(default_factory=NodeInfo) network_info: NetworkInfo = dataclasses.field(default_factory=NetworkInfo) counters: CounterGroups = dataclasses.field(init=False, default=None) broadcast_counters: CounterGroups = dataclasses.field(init=False, default=None) device_counters: CounterGroups = dataclasses.field(init=False, default=None) group_counters: CounterGroups = dataclasses.field(init=False, default=None) def __post_init__(self) -> None: """Initialize default counters.""" for col_name in ("", "broadcast_", "device_", "group_"): setattr(self, f"{col_name}counters", CounterGroups()) @property @zigpy.util.deprecated("`network_information` has been renamed to `network_info`") def network_information(self) -> NetworkInfo: return self.network_info @property @zigpy.util.deprecated("`node_information` has been renamed to `node_info`") def node_information(self) -> NodeInfo: return self.node_info zigpy-0.62.3/zigpy/topology.py000066400000000000000000000205151456054056700163530ustar00rootroot00000000000000"""Topology builder.""" from __future__ import annotations import asyncio import collections import itertools import logging import random import typing import zigpy.config import zigpy.device import zigpy.types as t import zigpy.util import zigpy.zdo.types as zdo_t LOGGER = logging.getLogger(__name__) REQUEST_DELAY = (1.0, 1.5) if typing.TYPE_CHECKING: import zigpy.application RETRY_SLOW = zigpy.util.retryable_request(tries=3, delay=1) class ScanNotSupported(Exception): pass INVALID_NEIGHBOR_IEEES = { t.EUI64.convert("00:00:00:00:00:00:00:00"), t.EUI64.convert("ff:ff:ff:ff:ff:ff:ff:ff"), } class Topology(zigpy.util.ListenableMixin): """Topology scanner.""" def __init__(self, app: zigpy.application.ControllerApplication) -> None: """Instantiate.""" self._app: zigpy.application.ControllerApplication = app self._listeners: dict = {} self._scan_task: asyncio.Task | None = None self._scan_loop_task: asyncio.Task | None = None # Keep track of devices that do not support scanning self._neighbors_unsupported: set[t.EUI64] = set() self._routes_unsupported: set[t.EUI64] = set() self.neighbors: dict[t.EUI64, list[zdo_t.Neighbor]] = collections.defaultdict( list ) self.routes: dict[t.EUI64, list[zdo_t.Route]] = collections.defaultdict(list) def start_periodic_scans(self, period: int | float) -> None: self.stop_periodic_scans() self._scan_loop_task = asyncio.create_task(self._scan_loop(period)) def stop_periodic_scans(self) -> None: if self._scan_loop_task is not None: self._scan_loop_task.cancel() async def _scan_loop(self, period: int | float) -> None: """Delay scan by creating a task.""" while True: await asyncio.sleep(period) # Don't run a scheduled scan if a scan is already running if self._scan_task is not None and not self._scan_task.done(): continue LOGGER.debug("Starting scheduled neighbor scan") try: await self.scan() except asyncio.CancelledError: # We explicitly catch a cancellation here to ensure the scan loop will # not be interrupted if a manual scan is initiated LOGGER.debug("Topology scan cancelled") except (Exception, asyncio.CancelledError): LOGGER.debug("Topology scan failed", exc_info=True) async def scan( self, devices: typing.Iterable[zigpy.device.Device] | None = None ) -> None: """Preempt Topology scan and reschedule.""" if self._scan_task and not self._scan_task.done(): LOGGER.debug("Cancelling old scanning task") self._scan_task.cancel() self._scan_task = asyncio.create_task(self._scan(devices)) await self._scan_task async def _scan_table( self, scan_request: typing.Callable, entries_attr: str ) -> list[typing.Any]: """Scan a device table by sending ZDO requests.""" index = 0 table = [] while True: status, rsp = await RETRY_SLOW(scan_request)(index) if status != zdo_t.Status.SUCCESS: raise ScanNotSupported() entries = getattr(rsp, entries_attr) table.extend(entries) index += len(entries) # We intentionally sleep after every request, even the last one, to simplify # delay logic when scanning many devices in quick succession await asyncio.sleep(random.uniform(*REQUEST_DELAY)) if index >= rsp.Entries or not entries: break return table async def _scan_neighbors( self, device: zigpy.device.Device ) -> list[zdo_t.Neighbor]: if device.ieee in self._neighbors_unsupported: return [] LOGGER.debug("Scanning neighbors of %s", device) try: table = await self._scan_table(device.zdo.Mgmt_Lqi_req, "NeighborTableList") except ScanNotSupported: table = [] self._neighbors_unsupported.add(device.ieee) return [n for n in table if n.ieee not in INVALID_NEIGHBOR_IEEES] async def _scan_routes(self, device: zigpy.device.Device) -> list[zdo_t.Route]: if device.ieee in self._routes_unsupported: return [] LOGGER.debug("Scanning routing table of %s", device) try: table = await self._scan_table(device.zdo.Mgmt_Rtg_req, "RoutingTableList") except ScanNotSupported: table = [] self._routes_unsupported.add(device.ieee) return table async def _scan( self, devices: typing.Iterable[zigpy.device.Device] | None = None ) -> None: """Scan topology.""" if devices is None: # We iterate over a copy of the devices as opposed to the live dictionary devices = list(self._app.devices.values()) for index, device in enumerate(devices): LOGGER.debug( "Scanning topology (%d/%d) of %s", index + 1, len(devices), device ) # Ignore devices that aren't routers if device.node_desc is None or not ( device.node_desc.is_router or device.node_desc.is_coordinator ): continue # Ignore devices that do not support scanning tables if ( device.ieee in self._neighbors_unsupported and device.ieee in self._routes_unsupported ): continue # Some coordinators have issues when performing loopback scans if ( self._app.config[zigpy.config.CONF_TOPO_SKIP_COORDINATOR] and device is self._app._device ): continue try: self.neighbors[device.ieee] = await self._scan_neighbors(device) except Exception as e: LOGGER.debug("Failed to scan neighbors of %s", device, exc_info=e) else: LOGGER.info( "Scanned neighbors of %s: %s", device, self.neighbors[device.ieee] ) self.listener_event( "neighbors_updated", device.ieee, self.neighbors[device.ieee] ) try: # Filter out inactive routes routes = await self._scan_routes(device) self.routes[device.ieee] = [ route for route in routes if route.RouteStatus != zdo_t.RouteStatus.Inactive ] except Exception as e: LOGGER.debug("Failed to scan routes of %s", device, exc_info=e) else: LOGGER.info( "Scanned routes of %s: %s", device, self.routes[device.ieee] ) self.listener_event("routes_updated", device.ieee, self.routes[device.ieee]) LOGGER.debug("Finished scanning neighbors for all devices") await self._find_unknown_devices(neighbors=self.neighbors, routes=self.routes) async def _find_unknown_devices( self, *, neighbors: dict[t.EUI64, list[zdo_t.Neighbor]], routes: dict[t.EUI64, list[zdo_t.Route]], ) -> None: """Discover unknown devices discovered during topology scanning""" # Build a list of unknown devices from the topology scan unknown_nwks = set() for neighbor in itertools.chain.from_iterable(neighbors.values()): try: self._app.get_device(nwk=neighbor.nwk) except KeyError: unknown_nwks.add(neighbor.nwk) for route in itertools.chain.from_iterable(routes.values()): # Ignore inactive or pending routes if route.RouteStatus != zdo_t.RouteStatus.Active: continue for nwk in (route.DstNWK, route.NextHop): try: self._app.get_device(nwk=nwk) except KeyError: unknown_nwks.add(nwk) # Try to discover any unknown devices for nwk in unknown_nwks: LOGGER.debug("Found unknown device nwk=%s", nwk) await self._app._discover_unknown_device(nwk) await asyncio.sleep(random.uniform(*REQUEST_DELAY)) zigpy-0.62.3/zigpy/types/000077500000000000000000000000001456054056700152665ustar00rootroot00000000000000zigpy-0.62.3/zigpy/types/__init__.py000066400000000000000000000006551456054056700174050ustar00rootroot00000000000000from __future__ import annotations from .basic import * # noqa: F401,F403 from .named import * # noqa: F401,F403 from .struct import * # noqa: F401,F403 def deserialize(data, schema): result = [] for type_ in schema: value, data = type_.deserialize(data) result.append(value) return result, data def serialize(data, schema): return b"".join(t(v).serialize() for t, v in zip(schema, data)) zigpy-0.62.3/zigpy/types/basic.py000066400000000000000000000637461456054056700167410ustar00rootroot00000000000000from __future__ import annotations import enum import inspect import struct import sys import typing CALLABLE_T = typing.TypeVar("CALLABLE_T", bound=typing.Callable) T = typing.TypeVar("T") class Bits(list): @classmethod def from_bitfields(cls, fields): instance = cls() # Little endian, so [11, 1000, 00] will be packed as 00_1000_11 for field in fields[::-1]: instance.extend(field.bits()) return instance def serialize(self) -> bytes: if len(self) % 8 != 0: raise ValueError(f"Cannot serialize {len(self)} bits into bytes: {self}") serialized_bytes = [] for index in range(0, len(self), 8): byte = 0x00 for bit in self[index : index + 8]: byte <<= 1 byte |= bit serialized_bytes.append(byte) return bytes(serialized_bytes) @classmethod def deserialize(cls, data) -> tuple[Bits, bytes]: bits: list[int] = [] for byte in data: bits.extend((byte >> i) & 1 for i in range(7, -1, -1)) return cls(bits), b"" class SerializableBytes: """A container object for raw bytes that enforces `serialize()` will be called.""" def __init__(self, value: bytes = b"") -> None: if isinstance(value, type(self)): value = value.value elif not isinstance(value, (bytes, bytearray)): raise ValueError(f"Object is not bytes: {value!r}") # noqa: TRY004 self.value = value def __eq__(self, other: typing.Any) -> bool: if not isinstance(other, type(self)): return NotImplemented return self.value == other.value def serialize(self) -> bytes: return self.value def __repr__(self) -> str: return f"Serialized[{self.value!r}]" NOT_SET = object() class FixedIntType(int): _signed = None _bits = None _size = None # Only for backwards compatibility, not set for smaller ints _byteorder = None min_value: int max_value: int def __new__(cls, *args, **kwargs): if cls._signed is None or cls._bits is None: raise TypeError(f"{cls} is abstract and cannot be created") n = super().__new__(cls, *args, **kwargs) # We use `n + 0` to convert `n` into an integer without calling `int()` if not cls.min_value <= n + 0 <= cls.max_value: raise ValueError( f"{int(n)} is not an {'un' if not cls._signed else ''}signed" f" {cls._bits} bit integer" ) return n def _hex_repr(self): assert self._bits % 4 == 0 return f"0x{{:0{self._bits // 4}X}}".format(int(self)) def _bin_repr(self): return f"0b{{:0{self._bits}b}}".format(int(self)) def __init_subclass__( cls, signed=NOT_SET, bits=NOT_SET, repr=NOT_SET, byteorder=NOT_SET ) -> None: super().__init_subclass__() if signed is not NOT_SET: cls._signed = signed if bits is not NOT_SET: cls._bits = bits if bits % 8 == 0: cls._size = bits // 8 else: cls._size = None if cls._bits is not None and cls._signed is not None: if cls._signed: cls.min_value = -(2 ** (cls._bits - 1)) cls.max_value = 2 ** (cls._bits - 1) - 1 else: cls.min_value = 0 cls.max_value = 2**cls._bits - 1 if repr == "hex": assert cls._bits % 4 == 0 cls.__str__ = cls.__repr__ = cls._hex_repr elif repr == "bin": cls.__str__ = cls.__repr__ = cls._bin_repr elif not repr: cls.__str__ = super().__str__ cls.__repr__ = super().__repr__ elif repr is not NOT_SET: raise ValueError(f"Invalid repr value {repr!r}. Must be either hex or bin") if byteorder is not NOT_SET: cls._byteorder = byteorder elif cls._byteorder is None: cls._byteorder = "little" if sys.version_info < (3, 10): # XXX: The enum module uses the first class with __new__ in its __dict__ # as the member type. We have to ensure this is true for # every subclass. # Fixed with https://github.com/python/cpython/pull/26658 if "__new__" not in cls.__dict__: cls.__new__ = cls.__new__ # XXX: The enum module sabotages pickling using the same logic. if "__reduce_ex__" not in cls.__dict__: cls.__reduce_ex__ = cls.__reduce_ex__ def bits(self) -> Bits: return Bits([(self >> n) & 0b1 for n in range(self._bits - 1, -1, -1)]) @classmethod def from_bits(cls, bits: Bits) -> tuple[FixedIntType, Bits]: if len(bits) < cls._bits: raise ValueError(f"Not enough bits to decode {cls}: {bits}") n = 0 for bit in bits[-cls._bits :]: n <<= 1 n |= bit & 1 if cls._signed and n >= 2 ** (cls._bits - 1): n -= 2**cls._bits return cls(n), bits[: -cls._bits] def serialize(self) -> bytes: if self._bits % 8 != 0: raise TypeError(f"Integer type with {self._bits} bits is not byte aligned") return self.to_bytes(self._bits // 8, self._byteorder, signed=self._signed) @classmethod def deserialize(cls, data: bytes) -> tuple[FixedIntType, bytes]: if cls._bits % 8 != 0: raise TypeError(f"Integer type with {cls._bits} bits is not byte aligned") byte_size = cls._bits // 8 if len(data) < byte_size: raise ValueError(f"Data is too short to contain {byte_size} bytes") r = cls.from_bytes(data[:byte_size], cls._byteorder, signed=cls._signed) data = data[byte_size:] return r, data class uint_t(FixedIntType, signed=False): pass class int_t(FixedIntType, signed=True): pass class int8s(int_t, bits=8): pass class int16s(int_t, bits=16): pass class int24s(int_t, bits=24): pass class int32s(int_t, bits=32): pass class int40s(int_t, bits=40): pass class int48s(int_t, bits=48): pass class int56s(int_t, bits=56): pass class int64s(int_t, bits=64): pass class uint1_t(uint_t, bits=1): pass class uint2_t(uint_t, bits=2): pass class uint3_t(uint_t, bits=3): pass class uint4_t(uint_t, bits=4): pass class uint5_t(uint_t, bits=5): pass class uint6_t(uint_t, bits=6): pass class uint7_t(uint_t, bits=7): pass class uint8_t(uint_t, bits=8): pass class uint16_t(uint_t, bits=16): pass class uint24_t(uint_t, bits=24): pass class uint32_t(uint_t, bits=32): pass class uint40_t(uint_t, bits=40): pass class uint48_t(uint_t, bits=48): pass class uint56_t(uint_t, bits=56): pass class uint64_t(uint_t, bits=64): pass class uint_t_be(FixedIntType, signed=False, byteorder="big"): pass class int_t_be(FixedIntType, signed=True, byteorder="big"): pass class int16s_be(int_t_be, bits=16): pass class int24s_be(int_t_be, bits=24): pass class int32s_be(int_t_be, bits=32): pass class int40s_be(int_t_be, bits=40): pass class int48s_be(int_t_be, bits=48): pass class int56s_be(int_t_be, bits=56): pass class int64s_be(int_t_be, bits=64): pass class uint16_t_be(uint_t_be, bits=16): pass class uint24_t_be(uint_t_be, bits=24): pass class uint32_t_be(uint_t_be, bits=32): pass class uint40_t_be(uint_t_be, bits=40): pass class uint48_t_be(uint_t_be, bits=48): pass class uint56_t_be(uint_t_be, bits=56): pass class uint64_t_be(uint_t_be, bits=64): pass class AlwaysCreateEnumType(enum.EnumMeta): """Enum metaclass that skips the functional creation API.""" def __call__(cls, value, names=None, *values) -> type[enum.Enum]: # type: ignore """Custom implementation of Enum.__new__. From https://github.com/python/cpython/blob/v3.11.5/Lib/enum.py#L1091-L1140 """ # all enum instances are actually created during class construction # without calling this method; this method is called by the metaclass' # __call__ (i.e. Color(3) ), and by pickle if type(value) is cls: # For lookups like Color(Color.RED) return value # by-value search for a matching enum member # see if it's in the reverse mapping (for hashable values) try: return cls._value2member_map_[value] except KeyError: # Not found, no need to do long O(n) search pass except TypeError: # not there, now do long search -- O(n) behavior for member in cls._member_map_.values(): if member._value_ == value: return member # still not found -- try _missing_ hook try: exc = None result = cls._missing_(value) except Exception as e: exc = e result = None try: if isinstance(result, cls): return result elif ( enum.Flag is not None and issubclass(cls, enum.Flag) and cls._boundary_ is enum.EJECT and isinstance(result, int) ): return result else: ve_exc = ValueError(f"{value!r} is not a valid {cls.__qualname__}") if result is None and exc is None: raise ve_exc elif exc is None: exc = TypeError( f"error in {cls.__name__}._missing_: returned {result!r} instead of None or a valid member" ) if not isinstance(exc, ValueError): exc.__context__ = ve_exc raise exc finally: # ensure all variables that could hold an exception are destroyed exc = None ve_exc = None class _IntEnumMeta(AlwaysCreateEnumType): def __call__(cls, value, names=None, *args, **kwargs): if isinstance(value, str): if value.startswith("0x"): value = int(value, base=16) elif value.isnumeric(): value = int(value) elif value.startswith(cls.__name__ + "."): value = cls[value[len(cls.__name__) + 1 :]].value else: value = cls[value].value return super().__call__(value, names, *args, **kwargs) def bitmap_factory(int_type: CALLABLE_T) -> CALLABLE_T: """Mixins are broken by Python 3.8.6 so we must dynamically create the enum with the appropriate methods but with only one non-Enum parent class. """ if sys.version_info >= (3, 11): class _NewEnum( int_type, enum.ReprEnum, enum.Flag, boundary=enum.KEEP, metaclass=AlwaysCreateEnumType, ): pass else: class _NewEnum(int_type, enum.Flag): # Rebind classmethods to our own class _missing_ = classmethod(enum.IntFlag._missing_.__func__) _create_pseudo_member_ = classmethod( enum.IntFlag._create_pseudo_member_.__func__ ) __or__ = enum.IntFlag.__or__ __and__ = enum.IntFlag.__and__ __xor__ = enum.IntFlag.__xor__ __ror__ = enum.IntFlag.__ror__ __rand__ = enum.IntFlag.__rand__ __rxor__ = enum.IntFlag.__rxor__ __invert__ = enum.IntFlag.__invert__ return _NewEnum def enum_factory(int_type: CALLABLE_T, undefined: str = "undefined") -> CALLABLE_T: """Enum factory.""" class _NewEnum(int_type, enum.Enum, metaclass=_IntEnumMeta): @classmethod def _missing_(cls, value): new = cls._member_type_.__new__(cls, value) if cls._bits % 8 == 0: name = f"{undefined}_{new._hex_repr().lower()}" else: name = f"{undefined}_{new._bin_repr()}" new._name_ = name.format(value) new._value_ = value return new def __format__(self, format_spec: str) -> str: if format_spec: # Allow formatting the integer enum value return self._member_type_.__format__(self, format_spec) else: # Otherwise, format it as its string representation return object.__format__(repr(self), format_spec) return _NewEnum class enum1(enum_factory(uint1_t)): # noqa: N801 pass class enum2(enum_factory(uint2_t)): # noqa: N801 pass class enum3(enum_factory(uint3_t)): # noqa: N801 pass class enum4(enum_factory(uint4_t)): # noqa: N801 pass class enum5(enum_factory(uint5_t)): # noqa: N801 pass class enum6(enum_factory(uint6_t)): # noqa: N801 pass class enum7(enum_factory(uint7_t)): # noqa: N801 pass class enum8(enum_factory(uint8_t)): # noqa: N801 pass class enum16(enum_factory(uint16_t)): # noqa: N801 pass class enum32(enum_factory(uint32_t)): # noqa: N801 pass class enum16_be(enum_factory(uint16_t_be)): # noqa: N801 pass class enum32_be(enum_factory(uint32_t_be)): # noqa: N801 pass class bitmap2(bitmap_factory(uint2_t)): pass class bitmap3(bitmap_factory(uint3_t)): pass class bitmap4(bitmap_factory(uint4_t)): pass class bitmap5(bitmap_factory(uint5_t)): pass class bitmap6(bitmap_factory(uint6_t)): pass class bitmap7(bitmap_factory(uint7_t)): pass class bitmap8(bitmap_factory(uint8_t)): pass class bitmap16(bitmap_factory(uint16_t)): pass class bitmap24(bitmap_factory(uint24_t)): pass class bitmap32(bitmap_factory(uint32_t)): pass class bitmap40(bitmap_factory(uint40_t)): pass class bitmap48(bitmap_factory(uint48_t)): pass class bitmap56(bitmap_factory(uint56_t)): pass class bitmap64(bitmap_factory(uint64_t)): pass class bitmap16_be(bitmap_factory(uint16_t_be)): pass class bitmap24_be(bitmap_factory(uint24_t_be)): pass class bitmap32_be(bitmap_factory(uint32_t_be)): pass class bitmap40_be(bitmap_factory(uint40_t_be)): pass class bitmap48_be(bitmap_factory(uint48_t_be)): pass class bitmap56_be(bitmap_factory(uint56_t_be)): pass class bitmap64_be(bitmap_factory(uint64_t_be)): pass class BaseFloat(float): _exponent_bits = None _fraction_bits = None _size = None def __init_subclass__(cls, exponent_bits, fraction_bits): size_bits = 1 + exponent_bits + fraction_bits assert size_bits % 8 == 0 cls._exponent_bits = exponent_bits cls._fraction_bits = fraction_bits cls._size = size_bits // 8 @staticmethod def _convert_format(*, src: BaseFloat, dst: BaseFloat, n: int) -> int: """Converts an integer representing a float from one format into another. Note: 1. Format is assumed to be little endian: 0b[sign bit] [exponent] [fraction] 2. Truncates/extends the exponent, preserving the special cases of all 1's and all 0's. 3. Truncates/extends the fractional bits from the right, allowing lossless conversion to a "bigger" representation. """ src_sign = n >> (src._exponent_bits + src._fraction_bits) src_frac = n & ((1 << src._fraction_bits) - 1) src_biased_exp = (n >> src._fraction_bits) & ((1 << src._exponent_bits) - 1) src_exp = src_biased_exp - 2 ** (src._exponent_bits - 1) if src_biased_exp == (1 << src._exponent_bits) - 1: dst_biased_exp = 2**dst._exponent_bits - 1 elif src_biased_exp == 0: dst_biased_exp = 0 else: dst_min_exp = 2 - 2 ** (dst._exponent_bits - 1) # Can't be all zeroes dst_max_exp = 2 ** (dst._exponent_bits - 1) - 2 # Can't be all ones dst_exp = min(max(dst_min_exp, src_exp), dst_max_exp) dst_biased_exp = dst_exp + 2 ** (dst._exponent_bits - 1) # We add/remove LSBs if src._fraction_bits > dst._fraction_bits: dst_frac = src_frac >> (src._fraction_bits - dst._fraction_bits) else: dst_frac = src_frac << (dst._fraction_bits - src._fraction_bits) return ( src_sign << (dst._exponent_bits + dst._fraction_bits) | dst_biased_exp << (dst._fraction_bits) | dst_frac ) def serialize(self) -> bytes: return self._convert_format( src=Double, dst=self, n=int.from_bytes(struct.pack(" tuple[BaseFloat, bytes]: if len(data) < cls._size: raise ValueError(f"Data is too short to contain {cls._size} bytes") double_bytes = cls._convert_format( src=cls, dst=Double, n=int.from_bytes(data[: cls._size], "little") ).to_bytes(Double._size, "little") return cls(struct.unpack("= pow(256, self._prefix_length) - 1: raise ValueError("OctetString is too long") return len(self).to_bytes(self._prefix_length, "little", signed=False) + self @classmethod def deserialize(cls, data): if len(data) < cls._prefix_length: raise ValueError("Data is too short") num_bytes = int.from_bytes(data[: cls._prefix_length], "little") if len(data) < cls._prefix_length + num_bytes: raise ValueError("Data is too short") s = data[cls._prefix_length : cls._prefix_length + num_bytes] return cls(s), data[cls._prefix_length + num_bytes :] def LimitedLVBytes(max_len): # noqa: N802 class LimitedLVBytes(LVBytes): _max_len = max_len def serialize(self): if len(self) > self._max_len: raise ValueError(f"LVBytes is too long (>{self._max_len})") return super().serialize() return LimitedLVBytes class LVBytesSize2(LVBytes): def serialize(self): if len(self) != 2: raise ValueError("LVBytes must be of size 2") return super().serialize() @classmethod def deserialize(cls, data): d, r = super().deserialize(data) if len(d) != 2: raise ValueError("LVBytes must be of size 2") return d, r class LongOctetString(LVBytes): _prefix_length = 2 class KwargTypeMeta(type): # So things like `LVList[NWK, t.uint8_t]` are singletons _anonymous_classes = {} # type:ignore[var-annotated] def __new__(metaclass, name, bases, namespaces, **kwargs): cls_kwarg_attrs = namespaces.get("_getitem_kwargs", {}) def __init_subclass__(cls, **kwargs): filtered_kwargs = kwargs.copy() for name, _value in kwargs.items(): if name in cls_kwarg_attrs: setattr(cls, f"_{name}", filtered_kwargs.pop(name)) super().__init_subclass__(**filtered_kwargs) if "__init_subclass__" not in namespaces: namespaces["__init_subclass__"] = __init_subclass__ return type.__new__(metaclass, name, bases, namespaces, **kwargs) def __getitem__(cls, key): # Make sure Foo[a] is the same as Foo[a,] if not isinstance(key, tuple): key = (key,) signature = inspect.Signature( parameters=[ inspect.Parameter( name=k, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, default=v if v is not None else inspect.Parameter.empty, ) for k, v in cls._getitem_kwargs.items() ] ) bound = signature.bind(*key) bound.apply_defaults() # Default types need to work, which is why we need to create the key down here expanded_key = tuple(bound.arguments.values()) if (cls, expanded_key) in cls._anonymous_classes: return cls._anonymous_classes[cls, expanded_key] class AnonSubclass(cls, **bound.arguments): pass AnonSubclass.__name__ = AnonSubclass.__qualname__ = f"Anonymous{cls.__name__}" cls._anonymous_classes[cls, expanded_key] = AnonSubclass return AnonSubclass def __subclasscheck__(cls, subclass): if type(subclass) is not KwargTypeMeta: return False # Named subclasses are handled normally if not cls.__name__.startswith("Anonymous"): return super().__subclasscheck__(subclass) # Anonymous subclasses must be identical if subclass.__name__.startswith("Anonymous"): return cls is subclass # A named class is a "subclass" of an anonymous subclass only if its ancestors # are all the same if subclass.__mro__[-len(cls.__mro__) + 1 :] != cls.__mro__[1:]: return False # They must also have the same class kwargs for key in cls._getitem_kwargs: key = f"_{key}" if getattr(cls, key) != getattr(subclass, key): return False return True def __instancecheck__(self, subclass): # We rely on __subclasscheck__ to do the work if issubclass(type(subclass), self): return True return super().__instancecheck__(subclass) class List(list, metaclass=KwargTypeMeta): _item_type = None _getitem_kwargs = {"item_type": None} def serialize(self) -> bytes: assert self._item_type is not None return b"".join([self._item_type(i).serialize() for i in self]) @classmethod def deserialize(cls: type[T], data: bytes) -> tuple[T, bytes]: assert cls._item_type is not None lst = cls() while data: item, data = cls._item_type.deserialize(data) lst.append(item) return lst, data class LVList(list, metaclass=KwargTypeMeta): _item_type = None _length_type = uint8_t _getitem_kwargs = {"item_type": None, "length_type": uint8_t} def serialize(self) -> bytes: assert self._item_type is not None return self._length_type(len(self)).serialize() + b"".join( [self._item_type(i).serialize() for i in self] ) @classmethod def deserialize(cls: type[T], data: bytes) -> tuple[T, bytes]: assert cls._item_type is not None length, data = cls._length_type.deserialize(data) r = cls() for _i in range(length): item, data = cls._item_type.deserialize(data) r.append(item) return r, data class FixedList(list, metaclass=KwargTypeMeta): _item_type = None _length = None _getitem_kwargs = {"item_type": None, "length": None} def serialize(self) -> bytes: assert self._length is not None if len(self) != self._length: raise ValueError( f"Invalid length for {self!r}: expected {self._length}, got {len(self)}" ) return b"".join([self._item_type(i).serialize() for i in self]) @classmethod def deserialize(cls: type[T], data: bytes) -> tuple[T, bytes]: assert cls._item_type is not None r = cls() for _i in range(cls._length): item, data = cls._item_type.deserialize(data) r.append(item) return r, data class CharacterString(str): _prefix_length = 1 def serialize(self) -> bytes: if len(self) >= pow(256, self._prefix_length) - 1: raise ValueError("String is too long") return len(self).to_bytes( self._prefix_length, "little", signed=False ) + self.encode("utf8") @classmethod def deserialize(cls: type[T], data: bytes) -> tuple[T, bytes]: if len(data) < cls._prefix_length: raise ValueError("Data is too short") length = int.from_bytes(data[: cls._prefix_length], "little") if len(data) < cls._prefix_length + length: raise ValueError("Data is too short") raw = data[cls._prefix_length : cls._prefix_length + length] text = raw.split(b"\x00")[0].decode("utf8", errors="replace") # FIXME: figure out how to get this working: `T` is not behaving as expected in # the classmethod when it is not bound. r = cls(text) # type:ignore[call-arg] r.raw = raw return r, data[cls._prefix_length + length :] class LongCharacterString(CharacterString): _prefix_length = 2 def LimitedCharString(max_len): # noqa: N802 class LimitedCharString(CharacterString): _max_len = max_len def serialize(self) -> bytes: if len(self) > self._max_len: raise ValueError(f"String is too long (>{self._max_len})") return super().serialize() return LimitedCharString def Optional(optional_item_type): class Optional(optional_item_type): optional = True @classmethod def deserialize(cls, data): try: return super().deserialize(data) except ValueError: return None, b"" return Optional class data8(FixedList, item_type=uint8_t, length=1): """General data, Discrete, 8 bit.""" class data16(FixedList, item_type=uint8_t, length=2): """General data, Discrete, 16 bit.""" class data24(FixedList, item_type=uint8_t, length=3): """General data, Discrete, 24 bit.""" class data32(FixedList, item_type=uint8_t, length=4): """General data, Discrete, 32 bit.""" class data40(FixedList, item_type=uint8_t, length=5): """General data, Discrete, 40 bit.""" class data48(FixedList, item_type=uint8_t, length=6): """General data, Discrete, 48 bit.""" class data56(FixedList, item_type=uint8_t, length=7): """General data, Discrete, 56 bit.""" class data64(FixedList, item_type=uint8_t, length=8): """General data, Discrete, 64 bit.""" zigpy-0.62.3/zigpy/types/named.py000066400000000000000000000445251456054056700167360ustar00rootroot00000000000000from __future__ import annotations import dataclasses from datetime import datetime, timezone import enum import typing from . import basic from .struct import Struct if typing.TYPE_CHECKING: from typing_extensions import Self class BaseDataclassMixin: def replace(self, **kwargs) -> Self: return dataclasses.replace(self, **kwargs) def _hex_string_to_bytes(hex_string: str) -> bytes: """Parses a hex string with optional colon delimiters and whitespace into bytes.""" # Strips out whitespace and colons cleaned = "".join(hex_string.replace(":", "").split()).upper() return bytes.fromhex(cleaned) class BroadcastAddress(basic.enum16): ALL_DEVICES = 0xFFFF RESERVED_FFFE = 0xFFFE RX_ON_WHEN_IDLE = 0xFFFD ALL_ROUTERS_AND_COORDINATOR = 0xFFFC LOW_POWER_ROUTER = 0xFFFB RESERVED_FFFA = 0xFFFA RESERVED_FFF9 = 0xFFF9 RESERVED_FFF8 = 0xFFF8 class EUI64(basic.FixedList, item_type=basic.uint8_t, length=8): # EUI 64-bit ID (an IEEE address). def __repr__(self) -> str: return ":".join("%02x" % i for i in self[::-1]) def __hash__(self) -> int: # type: ignore return hash(repr(self)) @classmethod def convert(cls, ieee: str) -> EUI64: if ieee is None: return None ieee = [basic.uint8_t(p) for p in _hex_string_to_bytes(ieee)[::-1]] assert len(ieee) == cls._length return cls(ieee) EUI64.UNKNOWN = EUI64.convert("FF:FF:FF:FF:FF:FF:FF:FF") class KeyData(basic.FixedList, item_type=basic.uint8_t, length=16): def __repr__(self) -> str: return ":".join(f"{i:02x}" for i in self) @classmethod def convert(cls, key: str) -> KeyData: key = [basic.uint8_t(p) for p in _hex_string_to_bytes(key)] assert len(key) == cls._length return cls(key) KeyData.UNKNOWN = KeyData.convert("FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF:FF") class Bool(basic.enum8): false = 0 true = 1 class AttributeId(basic.uint16_t, repr="hex"): pass class BACNetOid(basic.uint32_t): pass class Channels(basic.bitmap32): """Zigbee Channels.""" NO_CHANNELS = 0x00000000 ALL_CHANNELS = 0x07FFF800 CHANNEL_11 = 0x00000800 CHANNEL_12 = 0x00001000 CHANNEL_13 = 0x00002000 CHANNEL_14 = 0x00004000 CHANNEL_15 = 0x00008000 CHANNEL_16 = 0x00010000 CHANNEL_17 = 0x00020000 CHANNEL_18 = 0x00040000 CHANNEL_19 = 0x00080000 CHANNEL_20 = 0x00100000 CHANNEL_21 = 0x00200000 CHANNEL_22 = 0x00400000 CHANNEL_23 = 0x00800000 CHANNEL_24 = 0x01000000 CHANNEL_25 = 0x02000000 CHANNEL_26 = 0x04000000 @classmethod def from_channel_list(cls: Channels, channels: typing.Iterable[int]) -> Channels: mask = cls.NO_CHANNELS for channel in channels: if not 11 <= channel <= 26: raise ValueError( f"Invalid channel number {channel}. Must be between 11 and 26." ) mask |= cls[f"CHANNEL_{channel}"] return mask def __iter__(self): cls = type(self) channels = [c for c in range(11, 26 + 1) if self & cls[f"CHANNEL_{c}"]] if self != cls.from_channel_list(channels): raise ValueError(f"Channels bitmap has unexpected members: {self}") return iter(channels) class ClusterId(basic.uint16_t): pass class Date(Struct): years_since_1900: basic.uint8_t month: basic.uint8_t day: basic.uint8_t day_of_week: basic.uint8_t @property def year(self): if self.years_since_1900 is None: return None return 1900 + self.years_since_1900 @year.setter def year(self, years): assert 1900 <= years <= 2155 self.years_since_1900 = years - 1900 class NWK(basic.uint16_t, repr="hex"): @classmethod def convert(cls, data: str) -> NWK: assert 4 * len(data) == cls._bits return cls.deserialize(bytes.fromhex(data)[::-1])[0] class PanId(NWK): pass class ExtendedPanId(EUI64): pass class Group(basic.uint16_t, repr="hex"): pass class NoData: @classmethod def deserialize(cls, data): return cls(), data def serialize(self): return b"" class TimeOfDay(Struct): hours: basic.uint8_t minutes: basic.uint8_t seconds: basic.uint8_t hundredths: basic.uint8_t class _Time(basic.uint32_t): pass class UTCTime(_Time): pass class StandardTime(_Time): """Adjusted for TimeZone but not for daylight saving.""" class LocalTime(_Time): """Standard time adjusted for daylight saving.""" class Relays(basic.LVList, item_type=NWK, length_type=basic.uint8_t): """Relay list for static routing.""" class APSStatus(basic.enum8): # A request has been executed successfully APS_SUCCESS = 0x00 # A transmit request failed since the ASDU is too large and fragmentation # is not supported APS_ASDU_TOO_LONG = 0xA0 # A received fragmented frame could not be defragmented at the current time APS_DEFRAG_DEFERRED = 0xA1 # A received fragmented frame could not be defragmented since the device # does not support fragmentation APS_DEFRAG_UNSUPPORTED = 0xA2 # A parameter value was out of range APS_ILLEGAL_REQUEST = 0xA3 # An APSME-UNBIND.request failed due to the requested binding link not # existing in the binding table APS_INVALID_BINDING = 0xA4 # An APSME-REMOVE-GROUP.request has been issued with a group identifier # that does not appear in the group table APS_INVALID_GROUP = 0xA5 # A parameter value was invalid or out of range APS_INVALID_PARAMETER = 0xA6 # An APSDE-DATA.request requesting acknowledged transmission failed due to # no acknowledgement being received APS_NO_ACK = 0xA7 # An APSDE-DATA.request with a destination addressing mode set to 0x00 # failed due to there being no devices bound to this device APS_NO_BOUND_DEVICE = 0xA8 # An APSDE-DATA.request with a destination addressing mode set to 0x03 # failed due to no corresponding short address found in the address map # table APS_NO_SHORT_ADDRESS = 0xA9 # An APSDE-DATA.request with a destination addressing mode set to 0x00 # failed due to a binding table not being supported on the device APS_NOT_SUPPORTED = 0xAA # An ASDU was received that was secured using a link key APS_SECURED_LINK_KEY = 0xAB # An ASDU was received that was secured using a network key APS_SECURED_NWK_KEY = 0xAC # An APSDE-DATA.request requesting security has resulted in an error # during the corresponding security processing APS_SECURITY_FAIL = 0xAD # An APSME-BIND.request or APSME.ADDGROUP.request issued when the binding # or group tables, respectively, were full APS_TABLE_FULL = 0xAE # An ASDU was received without any security APS_UNSECURED = 0xAF # An APSME-GET.request or APSMESET.request has been issued with an unknown # attribute identifier APS_UNSUPPORTED_ATTRIBUTE = 0xB0 @classmethod def _missing_(cls, value): chained = NWKStatus(value) status = cls._member_type_.__new__(cls, chained.value) status._name_ = chained.name status._value_ = value return status class MACStatus(basic.enum8): # Operation was successful MAC_SUCCESS = 0x00 # Association Status field MAC_PAN_AT_CAPACITY = 0x01 MAC_PAN_ACCESS_DENIED = 0x02 # The frame counter purportedly applied by the originator of the received # frame is invalid MAC_COUNTER_ERROR = 0xDB # The key purportedly applied by the originator of the received frame is # not allowed to be used with that frame type according to the key usage # policy of the recipient MAC_IMPROPER_KEY_TYPE = 0xDC # The security level purportedly applied # by the originator of the # received frame does not meet the minimum security level # required/expected by the recipient for that frame type MAC_IMPROPER_SECURITY_LEVEL = 0xDD # The received frame was purportedly secured using security based on IEEE # Std 802.15.4-2003, and such security is not supported by this standard MAC_UNSUPPORTED_LEGACY = 0xDE # The security purportedly applied by the originator of the received frame # is not supported MAC_UNSUPPORTED_SECURITY = 0xDF # The beacon was lost following a synchronization request MAC_BEACON_LOSS = 0xE0 # A transmission could not take place due to activity on the channel, i.e. # the CSMA-CA mechanism has failed MAC_CHANNEL_ACCESS_FAILURE = 0xE1 # The GTS request has been denied by the PAN coordinator MAC_DENIED = 0xE2 # The attempt to disable the transceiver has failed MAC_DISABLE_TRX_FAILURE = 0xE3 # Cryptographic processing of the received secured frame failed MAC_SECURITY_ERROR = 0xE4 # Either a frame resulting from processing has a length that is greater # than aMaxPHYPacketSize or a requested transaction is too large to fit in # the CAP or GTS MAC_FRAME_TOO_LONG = 0xE5 # The requested GTS transmission failed because the specified GTS either # did not have a transmit GTS direction or was not defined MAC_INVALID_GTS = 0xE6 # A request to purge an MSDU from the transaction queue was made using an # MSDU handle that was not found in the transaction table MAC_INVALID_HANDLE = 0xE7 # A parameter in the primitive is either not supported or is out of the # valid range MAC_INVALID_PARAMETER = 0xE8 # No acknowledgment was received after macMaxFrameRetries MAC_NO_ACK = 0xE9 # A scan operation failed to find any network beacons MAC_NO_BEACON = 0xEA # No response data was available following a request MAC_NO_DATA = 0xEB # The operation failed because a 16-bit short address was not allocated MAC_NO_SHORT_ADDRESS = 0xEC # A receiver enable request was unsuccessful because it could not be # completed within the CAP. @note The enumeration description is not used # in this standard, and it is included only to meet the backwards # compatibility requirements for IEEE Std 802.15.4-2003 MAC_OUT_OF_CAP = 0xED # A PAN identifier conflict has been detected and communicated to the PAN # coordinator MAC_PAN_ID_CONFLICT = 0xEE # A coordinator realignment command has been received MAC_REALIGNMENT = 0xEF # The transaction has expired and its information was discarded MAC_TRANSACTION_EXPIRED = 0xF0 # There is no capacity to store the transaction MAC_TRANSACTION_OVERFLOW = 0xF1 # The transceiver was in the transmitter enabled state when the receiver # was requested to be enabled. @note The enumeration description is not # used in this standard, and it is included only to meet the backwards # compatibility requirements for IEEE Std 802.15.4-2003 MAC_TX_ACTIVE = 0xF2 # The key purportedly used by the originator of the received frame is not # available or, if available, the originating device is not known or is # blacklisted with that particular key MAC_UNAVAILABLE_KEY = 0xF3 # A SET/GET request was issued with the identifier of a PIB attribute that # is not supported MAC_UNSUPPORTED_ATTRIBUTE = 0xF4 # A request to send data was unsuccessful because neither the source # address parameters nor the destination address parameters were present MAC_INVALID_ADDRESS = 0xF5 # A receiver enable request was unsuccessful because it specified a number # of symbols that was longer than the beacon interval MAC_ON_TIME_TOO_LONG = 0xF6 # A receiver enable request was unsuccessful because it could not be # completed within the current superframe and was not permitted to be # deferred until the next superframe MAC_PAST_TIME = 0xF7 # The device was instructed to start sending beacons based on the timing # of the beacon transmissions of its coordinator, but the device is not # currently tracking the beacon of its coordinator MAC_TRACKING_OFF = 0xF8 # An attempt to write to a MAC PIB attribute that is in a table failed # because the specified table index was out of range MAC_INVALID_INDEX = 0xF9 # A scan operation terminated prematurely because the number of PAN # descriptors stored reached an implementation specified maximum MAC_LIMIT_REACHED = 0xFA # A SET/GET request was issued with the identifier of an attribute that is # read only MAC_READ_ONLY = 0xFB # A request to perform a scan operation failed because the MLME was in the # process of performing a previously initiated scan operation MAC_SCAN_IN_PROGRESS = 0xFC # The device was instructed to start sending beacons based on the timing # of the beacon transmissions of its coordinator, but the instructed start # time overlapped the transmission time of the beacon of its coordinator MAC_SUPERFRAME_OVERLAP = 0xFD class NWKStatus(basic.enum8): # A request has been executed successfully NWK_SUCCESS = 0x00 # An invalid or out-of-range parameter has been passed to a primitive from # the next higher layer NWK_INVALID_PARAMETER = 0xC1 # The next higher layer has issued a request that is invalid or cannot be # executed given the current state of the NWK layer NWK_INVALID_REQUEST = 0xC2 # An NLME-JOIN.request has been disallowed NWK_NOT_PERMITTED = 0xC3 # An NLME-NETWORK-FORMATION.request has failed to start a network NWK_STARTUP_FAILURE = 0xC4 # A device with the address supplied to the NLMEDIRECT-JOIN.request is # already present in the neighbor table of the device on which the # NLMEDIRECT-JOIN.request was issued NWK_ALREADY_PRESENT = 0xC5 # Used to indicate that an NLME-SYNC.request has failed at the MAC layer NWK_SYNC_FAILURE = 0xC6 # An NLME-JOIN-DIRECTLY.request has failed because there is no more room # in the neighbor table NWK_NEIGHBOR_TABLE_FULL = 0xC7 # An NLME-LEAVE.request has failed because the device addressed in the # parameter list is not in the neighbor table of the issuing device NWK_UNKNOWN_DEVICE = 0xC8 # An NLME-GET.request or NLME-SET.request has been issued with an unknown # attribute identifier NWK_UNSUPPORTED_ATTRIBUTE = 0xC9 # An NLME-JOIN.request has been issued in an environment where no networks # are detectable NWK_NO_NETWORKS = 0xCA NWK_RESERVED_0xCB = 0xCB # Security processing has been attempted on an outgoing frame, and has # failed because the frame counter has reached its maximum value NWK_NWK_MAX_FRM_COUNTER = 0xCC # Security processing has been attempted on an outgoing frame, and has # failed because no key was available with which to process it NWK_NO_KEY = 0xCD # Security processing has been attempted on an outgoing frame, and has # failed because the security engine produced erroneous output NWK_BAD_CCM_OUTPUT = 0xCE NWK_RESERVED_0xCF = 0xCF # An attempt to discover a route has failed due to a reason other than a # lack of routing capacity NWK_ROUTE_DISCOVERY_FAILED = 0xD0 # An NLDE-DATA.request has failed due to a routing failure on the sending # device or an NLMEROUTE-DISCOVERY.request has failed due to the cause # cited in the accompanying NetworkStatusCode NWK_ROUTE_ERROR = 0xD1 # An attempt to send a broadcast frame or member mode multicast has failed # due to the fact that there is no room in the BTT NWK_BT_TABLE_FULL = 0xD2 # An NLDE-DATA.request has failed due to insufficient buffering available. # A non-member mode multicast frame was discarded pending route discovery NWK_FRAME_NOT_BUFFERED = 0xD3 @classmethod def _missing_(cls, value): chained = MACStatus(value) status = cls._member_type_.__new__(cls, chained.value) status._name_ = chained.name status._value_ = value return status class AddrMode(basic.enum8): """Addressing mode.""" Group = 0x01 NWK = 0x02 IEEE = 0x03 Broadcast = 0x0F class Addressing: """Deprecated, only present for backwards compatibility.""" Group = AddrMode NWK = AddrMode IEEE = AddrMode Broadcast = AddrMode @dataclasses.dataclass class AddrModeAddress(BaseDataclassMixin): """Address mode and address.""" addr_mode: AddrMode address: NWK | Group | EUI64 | BroadcastAddress | None def __post_init__(self) -> None: if self.addr_mode is not None and self.address is not None: self.address = { AddrMode.Group: Group, AddrMode.NWK: NWK, AddrMode.IEEE: EUI64, AddrMode.Broadcast: BroadcastAddress, }[self.addr_mode](self.address) class TransmitOptions(enum.Flag): NONE = 0 ACK = 1 APS_Encryption = 2 @dataclasses.dataclass class ZigbeePacket(BaseDataclassMixin): """Container for the information in an incoming or outgoing ZDO or ZCL packet. The radio library is expected to fill this object in with all received data and pass it to zigpy for every type of packet. """ timestamp: datetime = dataclasses.field( compare=False, default_factory=lambda: datetime.now(timezone.utc) ) # Set to `None` when the packet is outgoing src: AddrModeAddress | None = dataclasses.field(default=None) src_ep: basic.uint8_t | None = dataclasses.field(default=None) # Set to `None` when the packet is incoming dst: AddrModeAddress | None = dataclasses.field(default=None) dst_ep: basic.uint8_t | None = dataclasses.field(default=None) # If the radio supports it, a source route for the packet source_route: list[NWK] | None = dataclasses.field(default=None) extended_timeout: bool = dataclasses.field(default=False) tsn: basic.uint8_t = dataclasses.field(default=0x00) profile_id: basic.uint16_t = dataclasses.field(default=0x0000) cluster_id: basic.uint16_t = dataclasses.field(default=0x0000) # Any serializable object data: basic.SerializableBytes = dataclasses.field( default_factory=basic.SerializableBytes ) # Options for outgoing packets tx_options: TransmitOptions = dataclasses.field(default=TransmitOptions.NONE) radius: basic.uint8_t = dataclasses.field(default=0) non_member_radius: basic.uint8_t = dataclasses.field(default=0) # Options for incoming packets lqi: basic.uint8_t | None = dataclasses.field(default=None) rssi: basic.int8s | None = dataclasses.field(default=None) zigpy-0.62.3/zigpy/types/struct.py000066400000000000000000000324411456054056700171700ustar00rootroot00000000000000from __future__ import annotations import dataclasses import inspect import typing import zigpy.types as t NoneType = type(None) class ListSubclass(list): # So we can call `setattr()` on it pass @dataclasses.dataclass(frozen=True) class StructField: name: str | None = None type: type | None = None dynamic_type: typing.Callable[[Struct], type] | None = None requires: typing.Callable[[Struct], bool] | None = dataclasses.field( default=None, repr=False ) optional: bool | None = False repr: typing.Callable[[typing.Any], str] | None = dataclasses.field( default=repr, repr=False ) def replace(self, **kwargs) -> StructField: return dataclasses.replace(self, **kwargs) def get_type(self, struct: Struct) -> type: if self.dynamic_type is not None: return self.dynamic_type(struct) return self.type def _convert_type(self, value, struct: Struct): field_type = self.get_type(struct) if value is None or isinstance(value, field_type): return value try: return field_type(value) except Exception as e: raise ValueError( f"Failed to convert {self.name}={value!r} from type" f" {type(value)} to {field_type}" ) from e _STRUCT = typing.TypeVar("_STRUCT", bound="Struct") class Struct: @classmethod def _real_cls(cls) -> type: # The "Optional" subclass is dynamically created and breaks types. # We have to use a little introspection to find our real class. return next(c for c in cls.__mro__ if c.__name__ != "Optional") def __init_subclass__(cls) -> None: super().__init_subclass__() # We generate fields up here to fail early and cache it cls.fields = cls._real_cls()._get_fields() # Check to see if the Struct is also an integer cls._int_type = next( ( c for c in cls.__mro__[1:] if issubclass(c, t.FixedIntType) and not issubclass(c, Struct) ), None, ) def __new__(cls: type[_STRUCT], *args, **kwargs) -> _STRUCT: cls = cls._real_cls() if len(args) == 1 and isinstance(args[0], cls): # Like a copy constructor if kwargs: raise ValueError(f"Cannot use copy constructor with kwargs: {kwargs!r}") kwargs = args[0].as_dict() args = () elif len(args) == 1 and cls._int_type is not None and isinstance(args[0], int): # Integer constructor return cls.deserialize(cls._int_type(args[0]).serialize())[0] # Pretend our signature is `__new__(cls, p1: t1, p2: t2, ...)` signature = inspect.Signature( parameters=[ inspect.Parameter( name=f.name, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, default=None, annotation=f.type, ) for f in cls.fields ] ) bound = signature.bind(*args, **kwargs) bound.apply_defaults() instance = super().__new__(cls) # Set each attributes on the instance for name, value in bound.arguments.items(): field = getattr(cls.fields, name) setattr(instance, name, field._convert_type(value, struct=instance)) return instance @classmethod def _get_fields(cls) -> list[StructField]: fields = ListSubclass() # We need both to throw type errors in case a field is not annotated annotations = typing.get_type_hints(cls._real_cls()) # Make sure every `StructField` is annotated for name in vars(cls._real_cls()): value = getattr(cls, name) if isinstance(value, StructField) and name not in annotations: raise TypeError( f"Field {name!r}={value} must have some annotation." f" Use `None` if it is specified in the `StructField`." ) # XXX: Python doesn't provide a simple way to get all defined attributes *and* # order them with respect to annotation-only fields. # Every struct field must be annotated. for name, annotation in annotations.items(): field = getattr(cls, name, StructField()) if not isinstance(field, StructField): continue field = field.replace(name=name) # An annotation of `None` means to use the field's type if annotation is not NoneType: if field.type is not None and field.type != annotation: raise TypeError( f"Field {name!r} type annotation conflicts with provided type:" f" {annotation} != {field.type}" ) field = field.replace(type=annotation) elif field.type is None and field.dynamic_type is None: raise TypeError(f"Field {name!r} has no type") fields.append(field) setattr(fields, field.name, field) return fields def assigned_fields(self, *, strict=False) -> list[tuple[StructField, typing.Any]]: assigned_fields = ListSubclass() for field in self.fields: value = getattr(self, field.name) # Ignore fields that aren't required if field.requires is not None and not field.requires(self): continue # Missing fields cause an error if strict if value is None and not field.optional: if strict: raise ValueError( f"Value for field {field.name!r} is required: {self!r}" ) else: pass # Python bug, the following `continue` is never covered continue # pragma: no cover assigned_fields.append((field, value)) setattr(assigned_fields, field.name, (field, value)) return assigned_fields @classmethod def from_dict(cls: type[_STRUCT], obj: dict[str, typing.Any]) -> _STRUCT: instance = cls() for key, value in obj.items(): field = getattr(cls.fields, key) field_type = field.get_type(instance) if issubclass(field_type, Struct): setattr(instance, field.name, field_type.from_dict(value)) else: setattr(instance, field.name, value) return instance def as_dict( self, *, skip_missing: bool = False, recursive: bool = False ) -> dict[str, typing.Any]: d = {} for f in self.fields: value = getattr(self, f.name) if value is None and skip_missing: continue elif recursive and isinstance(value, Struct): d[f.name] = value.as_dict( skip_missing=skip_missing, recursive=recursive ) else: d[f.name] = value return d def as_tuple(self, *, skip_missing: bool = False) -> tuple: return tuple(self.as_dict(skip_missing=skip_missing).values()) def serialize(self) -> bytes: chunks = [] bit_offset = 0 bitfields = [] for field, value in self.assigned_fields(strict=True): if value is None and field.optional: continue value = field._convert_type(value, struct=self) field_type = field.get_type(struct=self) # All integral types are compacted into one chunk, unless they start and end # on a byte boundary. if issubclass(field_type, t.FixedIntType) and not ( value._bits % 8 == 0 and bit_offset % 8 == 0 ): bit_offset += value._bits bitfields.append(value) # Serialize the current segment of bitfields once we reach a boundary if bit_offset % 8 == 0: chunks.append(t.Bits.from_bitfields(bitfields).serialize()) bitfields = [] continue elif bitfields: raise ValueError( f"Segment of bitfields did not terminate on a byte boundary: " f" {bitfields}" ) chunks.append(value.serialize()) if bitfields: raise ValueError( f"Trailing segment of bitfields did not terminate on a byte boundary: " f" {bitfields}" ) return b"".join(chunks) @classmethod def deserialize(cls: type[_STRUCT], data: bytes) -> tuple[_STRUCT, bytes]: instance = cls() bit_length = 0 bitfields = [] for field in cls.fields: if field.requires is not None and not field.requires(instance): continue elif not data and field.optional: continue field_type = field.get_type(struct=instance) if issubclass(field_type, t.FixedIntType) and not ( field_type._bits % 8 == 0 and bit_length % 8 == 0 ): bit_length += field_type._bits bitfields.append(field) if bit_length % 8 == 0: if len(data) < bit_length // 8: raise ValueError(f"Data is too short to contain {bitfields}") bits, _ = t.Bits.deserialize(data[: bit_length // 8]) data = data[bit_length // 8 :] for f in bitfields: value, bits = f.type.from_bits(bits) setattr(instance, f.name, value) assert not bits bit_length = 0 bitfields = [] continue elif bitfields: raise ValueError( f"Segment of bitfields did not terminate on a byte boundary: " f" {bitfields}" ) value, data = field_type.deserialize(data) setattr(instance, field.name, value) if bitfields: raise ValueError( f"Trailing segment of bitfields did not terminate on a byte boundary: " f" {bitfields}" ) return instance, data # TODO: improve? def replace(self: typing.Type[_STRUCT], **kwargs) -> _STRUCT: def replace(self, **kwargs: dict[str, typing.Any]) -> Struct: d = self.as_dict().copy() d.update(kwargs) return type(self)(**d) def __eq__(self, other: object) -> bool: if self._int_type is not None and isinstance(other, int): return int(self) == other elif not isinstance(self, type(other)) and not isinstance(other, type(self)): return NotImplemented return self.as_dict() == other.as_dict() def __int__(self) -> int: if self._int_type is None: return NotImplemented n, remaining = self._int_type.deserialize(self.serialize()) assert not remaining return int(n) def __lt__(self, other: object) -> bool: if self._int_type is None or not isinstance(other, int): return NotImplemented return int(self) < int(other) def __le__(self, other: object) -> bool: if self._int_type is None or not isinstance(other, int): return NotImplemented return int(self) <= int(other) def __gt__(self, other: object) -> bool: if self._int_type is None or not isinstance(other, int): return NotImplemented return int(self) > int(other) def __ge__(self, other: object) -> bool: if self._int_type is None or not isinstance(other, int): return NotImplemented return int(self) >= int(other) def __repr__(self) -> str: fields = [] # Assigned fields are displayed as `field=value` for f, v in self.assigned_fields(): fields.append(f"{f.name}={f.repr(v)}") cls = type(self) # Properties are displayed as `*prop=value` for attr in dir(cls): cls_attr = getattr(cls, attr) if not isinstance(cls_attr, property) or hasattr(Struct, attr): continue value = getattr(self, attr) if value is not None: fields.append(f"*{attr}={value!r}") extra = "" if self._int_type is not None: extra = f"<{self._int_type(int(self))._hex_repr()}>" return f"{type(self).__name__}{extra}({', '.join(fields)})" @property def is_valid(self) -> bool: try: self.serialize() return True except ValueError: return False def matches(self, other: Struct) -> bool: if not isinstance(self, type(other)) and not isinstance(other, type(self)): return False for field in self.fields: actual = getattr(self, field.name) expected = getattr(other, field.name) if expected is None: continue if isinstance(expected, Struct): if not actual.matches(expected): return False elif actual != expected: return False return True zigpy-0.62.3/zigpy/typing.py000066400000000000000000000022151456054056700160060ustar00rootroot00000000000000"""Typing helpers for Zigpy.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Union ConfigType = Dict[str, Any] # pylint: disable=invalid-name ClusterType = "Cluster" ControllerApplicationType = "ControllerApplication" CustomClusterType = "CustomCluster" CustomDeviceType = "CustomDevice" CustomEndpointType = "CustomEndpoint" DeviceType = "Device" EndpointType = "Endpoint" ZDOType = "ZDO" AddressingMode = "AddressingMode" if TYPE_CHECKING: import zigpy.application import zigpy.device import zigpy.endpoint import zigpy.quirks import zigpy.types import zigpy.zcl import zigpy.zdo ClusterType = zigpy.zcl.Cluster ControllerApplicationType = zigpy.application.ControllerApplication CustomClusterType = zigpy.quirks.CustomCluster CustomDeviceType = zigpy.quirks.CustomDevice CustomEndpointType = zigpy.quirks.CustomEndpoint DeviceType = zigpy.device.Device EndpointType = zigpy.endpoint.Endpoint ZDOType = zigpy.zdo.ZDO AddressingMode = Union[ zigpy.types.Addressing.Group, zigpy.types.Addressing.IEEE, zigpy.types.Addressing.NWK, ] zigpy-0.62.3/zigpy/util.py000066400000000000000000000335441456054056700154620ustar00rootroot00000000000000from __future__ import annotations import abc import asyncio import functools import inspect import logging import traceback import typing import warnings from crccheck.crc import CrcX25 from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers.algorithms import AES from cryptography.hazmat.primitives.ciphers.modes import ECB from zigpy.datastructures import DynamicBoundedSemaphore # noqa: F401 from zigpy.exceptions import ControllerException, ZigbeeException import zigpy.types as t LOGGER = logging.getLogger(__name__) class ListenableMixin: def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._listeners: dict[int, tuple[typing.Callable, bool]] = {} def _add_listener(self, listener: typing.Any, include_context: bool) -> int: id_ = id(listener) while id_ in self._listeners: id_ += 1 self._listeners[id_] = (listener, include_context) return id_ def add_listener(self, listener: typing.Any) -> int: return self._add_listener(listener, include_context=False) def add_context_listener(self, listener: CatchingTaskMixin) -> int: return self._add_listener(listener, include_context=True) def remove_listener(self, listener: typing.Any) -> None: for id_, (attached_listener, _) in self._listeners.items(): if attached_listener is listener: del self._listeners[id_] break def listener_event(self, method_name: str, *args) -> list[typing.Any | None]: result = [] for listener, include_context in self._listeners.values(): method = getattr(listener, method_name, None) if not method: continue try: if include_context: result.append(method(self, *args)) else: result.append(method(*args)) except Exception as e: LOGGER.debug( "Error calling listener %r with args %r", method, args, exc_info=e ) return result async def async_event(self, method_name: str, *args) -> list[typing.Any]: tasks = [] for listener, include_context in self._listeners.values(): method = getattr(listener, method_name, None) if not method: continue if include_context: tasks.append(method(self, *args)) else: tasks.append(method(*args)) results = [] for result in await asyncio.gather(*tasks, return_exceptions=True): if isinstance(result, Exception): LOGGER.debug( "Error calling listener %r with args %r", method, args, exc_info=result, ) else: results.append(result) return results class LocalLogMixin: @abc.abstractmethod def log(self, lvl: int, msg: str, *args, **kwargs): # pragma: no cover pass def _log(self, lvl: int, msg: str, *args, **kwargs) -> None: return self.log(lvl, msg, *args, stacklevel=4, **kwargs) def exception(self, msg, *args, **kwargs): return self._log(logging.ERROR, msg, *args, **kwargs) def debug(self, msg: str, *args, **kwargs) -> None: return self._log(logging.DEBUG, msg, *args, **kwargs) def info(self, msg: str, *args, **kwargs) -> None: return self._log(logging.INFO, msg, *args, **kwargs) def warning(self, msg: str, *args, **kwargs) -> None: return self._log(logging.WARNING, msg, *args, **kwargs) def error(self, msg, *args, **kwargs): return self._log(logging.ERROR, msg, *args, **kwargs) async def retry( func: typing.Callable[[], typing.Awaitable[typing.Any]], retry_exceptions: typing.Iterable[BaseException], tries: int = 3, delay: int | float = 0.1, ) -> typing.Any: """Retry a function in case of exception Only exceptions in `retry_exceptions` will be retried. """ while True: LOGGER.debug("Tries remaining: %s", tries) try: return await func() except retry_exceptions: if tries <= 1: raise tries -= 1 await asyncio.sleep(delay) def retryable( retry_exceptions: typing.Iterable[BaseException], tries: int = 1, delay: float = 0.1 ) -> typing.Callable: """Return a decorator which makes a function able to be retried. Only exceptions in `retry_exceptions` will be retried. """ def decorator(func: typing.Callable) -> typing.Callable: nonlocal tries, delay @functools.wraps(func) def wrapper(*args, **kwargs): if tries <= 1: return func(*args, **kwargs) return retry( functools.partial(func, *args, **kwargs), retry_exceptions, tries=tries, delay=delay, ) return wrapper return decorator retryable_request = functools.partial( retryable, (ZigbeeException, asyncio.TimeoutError) ) def aes_mmo_hash_update(length: int, result: bytes, data: bytes) -> tuple[int, bytes]: block_size = AES.block_size // 8 while len(data) >= block_size: block = bytes(data[:block_size]) # Encrypt aes = Cipher(AES(bytes(result)), ECB()).encryptor() result = bytearray(aes.update(block) + aes.finalize()) # XOR plaintext into ciphertext for i in range(block_size): result[i] ^= block[i] data = data[block_size:] length += block_size return (length, result) def aes_mmo_hash(data: bytes) -> t.KeyData: block_size = AES.block_size // 8 result_len = 0 remaining_length = 0 length = len(data) result = bytearray([0] * block_size) temp = bytearray([0] * block_size) if data and length > 0: remaining_length = length & (block_size - 1) if length >= block_size: # Mask out the lower byte since hash update will hash # everything except the last piece, if the last piece # is less than 16 bytes. hashed_length = length & ~(block_size - 1) (result_len, result) = aes_mmo_hash_update(result_len, result, data) data = data[hashed_length:] for i in range(remaining_length): temp[i] = data[i] # Per the spec, Concatenate a 1 bit followed by all zero bits # (previous memset() on temp[] set the rest of the bits to zero) temp[remaining_length] = 0x80 result_len += remaining_length # If appending the bit string will push us beyond the 16-byte boundary # we must hash that block and append another 16-byte block. if (block_size - remaining_length) < 3: (result_len, result) = aes_mmo_hash_update(result_len, result, temp) # Since this extra data is due to the concatenation, # we remove that length. We want the length of data only # and not the padding. result_len -= block_size temp = bytearray([0] * block_size) bit_size = result_len * 8 temp[block_size - 2] = (bit_size >> 8) & 0xFF temp[block_size - 1] = (bit_size) & 0xFF (result_len, result) = aes_mmo_hash_update(result_len, result, temp) return t.KeyData(result) def convert_install_code(code: bytes) -> t.KeyData: if len(code) not in (8, 10, 14, 18): return None real_crc = bytes(code[-2:]) crc = CrcX25() crc.process(code[:-2]) if real_crc != crc.finalbytes(byteorder="little"): return None return aes_mmo_hash(code) T = typing.TypeVar("T") class Request(typing.Generic[T]): """Request context manager.""" def __init__(self, pending: dict, sequence: T) -> None: """Init context manager for requests.""" self._pending = pending self._result: asyncio.Future = asyncio.Future() self._sequence = sequence @property def result(self) -> asyncio.Future: return self._result @property def sequence(self) -> T: """Request sequence.""" return self._sequence def __enter__(self) -> Request: """Return context manager.""" self._pending[self.sequence] = self return self def __exit__(self, exc_type: None, exc_value: None, exc_traceback: None) -> bool: """Clean up pending on exit.""" if not self.result.done(): self.result.cancel() self._pending.pop(self.sequence) return not exc_type class Requests(dict, typing.Generic[T]): def new(self, sequence: T) -> Request[T]: """Wrap new request into a context manager.""" if sequence in self: LOGGER.debug("Duplicate %s TSN: pending %s", sequence, self) raise ControllerException(f"Duplicate TSN: {sequence}") return Request(self, sequence) class CatchingTaskMixin(LocalLogMixin): """Allow creating tasks suppressing exceptions.""" _tasks: set[asyncio.Future[typing.Any]] = set() def create_catching_task( self, target: typing.Coroutine, exceptions: type[Exception] | tuple | None = None, name: str | None = None, ) -> None: """Create a task.""" task = asyncio.get_running_loop().create_task( self.catching_coro(target, exceptions), name=name ) self._tasks.add(task) task.add_done_callback(self._tasks.remove) async def catching_coro( self, target: typing.Coroutine, exceptions: type[Exception] | tuple | None = None, ) -> typing.Any: """Wrap a target coro and catch specified exceptions.""" if exceptions is None: exceptions = (asyncio.TimeoutError, ZigbeeException) try: return await target except exceptions: pass except (Exception, asyncio.CancelledError): # pylint: disable=broad-except # Do not print the wrapper in the traceback frames = len(inspect.trace()) - 1 exc_msg = traceback.format_exc(-frames) self.exception("%s", exc_msg) return None def deprecated(message: str) -> typing.Callable[[typing.Callable], typing.Callable]: """Decorator that emits a DeprecationWarning when the function or property is accessed.""" def decorator(function: typing.Callable) -> typing.Callable: @functools.wraps(function) def replacement(*args, **kwargs): warnings.warn( f"{function.__name__} is deprecated: {message}", DeprecationWarning ) return function(*args, **kwargs) return replacement return decorator def deprecated_attrs( mapping: dict[str, typing.Any] ) -> typing.Callable[[str], typing.Any]: """Create a module-level `__getattr__` function that remaps deprecated objects.""" def __getattr__(name: str) -> typing.Any: if name not in mapping: raise AttributeError(f"module {__name__!r} has no attribute {name!r}") replacement = mapping[name] warnings.warn( ( f"`{__name__}.{name}` has been renamed to" f" `{__name__}.{replacement.__name__}`" ), DeprecationWarning, ) return replacement return __getattr__ def pick_optimal_channel( channel_energy: dict[int, float], channels: t.Channels = t.Channels.from_channel_list([11, 15, 20, 25]), *, kernel: list[float] = (0.1, 0.5, 1.0, 0.5, 0.1), channel_penalty: dict[int, float] = { 11: 2.0, # ZLL but WiFi interferes 12: 3.0, 13: 3.0, 14: 3.0, 15: 1.0, # ZLL 16: 3.0, 17: 3.0, 18: 3.0, 19: 3.0, 20: 1.0, # ZLL 21: 3.0, 22: 3.0, 23: 3.0, 24: 3.0, 25: 1.0, # ZLL 26: 2.0, # Not ZLL but WiFi can interfere in some regions }, ) -> int: """Scans all channels and picks the best one from the given mask.""" assert len(kernel) % 2 == 1 kernel_width = (len(kernel) - 1) // 2 # Scan all channels even if we're restricted to picking among a few, since # nearby channels will affect our decision assert set(channel_energy.keys()) == set(t.Channels.ALL_CHANNELS) # type: ignore # We don't know energies above channel 26 or below 11. Assume the scan results # just continue indefinitely with the last-seen value. ext_energies = ( [channel_energy[11]] * kernel_width + [channel_energy[c] for c in t.Channels.ALL_CHANNELS] + [channel_energy[26]] * kernel_width ) # Incorporate the energies of nearby channels into our calculation by performing # a discrete convolution with the provided kernel. convolution = ext_energies[:] for i in range(len(ext_energies)): for j in range(-kernel_width, kernel_width + 1): if 0 <= i + j < len(convolution): convolution[i + j] += ext_energies[i] * kernel[kernel_width + j] # Crop off the extended bounds, leaving us with an array of the original size convolution = convolution[kernel_width:-kernel_width] # Incorporate a penalty to avoid specific channels unless absolutely necessary. # Adding `1` ensures the score is positive and the channel penalty gets applied even # when the reported LQI is zero. scores = { c: (1 + convolution[c - 11]) * channel_penalty.get(c, 1.0) for c in t.Channels.ALL_CHANNELS } optimal_channel = min(channels, key=lambda c: scores[c]) LOGGER.info("Optimal channel is %s", optimal_channel) LOGGER.debug("Channel scores: %s", scores) return optimal_channel class Singleton: """Singleton class.""" def __init__(self, name: str) -> None: self.name = name def __repr__(self) -> str: return f"" def __hash__(self) -> int: return hash(self.name) zigpy-0.62.3/zigpy/zcl/000077500000000000000000000000001456054056700147125ustar00rootroot00000000000000zigpy-0.62.3/zigpy/zcl/__init__.py000066400000000000000000001055241456054056700170320ustar00rootroot00000000000000from __future__ import annotations import collections from datetime import datetime, timezone import enum import functools import itertools import logging import types from typing import TYPE_CHECKING, Any, Iterable, Sequence import warnings from zigpy import util import zigpy.types as t from zigpy.typing import AddressingMode, EndpointType from zigpy.zcl import foundation from zigpy.zcl.foundation import BaseAttributeDefs, BaseCommandDefs if TYPE_CHECKING: from zigpy.appdb import PersistingListener from zigpy.endpoint import Endpoint LOGGER = logging.getLogger(__name__) def convert_list_schema( schema: Sequence[type], command_id: int, direction: foundation.Direction ) -> type[t.Struct]: schema_dict = {} for i, param_type in enumerate(schema, start=1): name = f"param{i}" real_type = next(c for c in param_type.__mro__ if c.__name__ != "Optional") if real_type is not param_type: name += "?" schema_dict[name] = real_type temp = foundation.ZCLCommandDef( schema=schema_dict, direction=direction, id=command_id, name="schema", ) return temp.with_compiled_schema().schema class ClusterType(enum.IntEnum): Server = 0 Client = 1 class Cluster(util.ListenableMixin, util.CatchingTaskMixin): """A cluster on an endpoint""" class AttributeDefs(BaseAttributeDefs): pass class ServerCommandDefs(BaseCommandDefs): pass class ClientCommandDefs(BaseCommandDefs): pass # Custom clusters for quirks subclass Cluster but should not be stored in any global # registries, since they're device-specific and collide with existing clusters. _skip_registry: bool = False # Most clusters are identified by a single cluster ID cluster_id: t.uint16_t = None # Clusters are accessible by name from their endpoint as an attribute ep_attribute: str = None # Manufacturer specific clusters exist between 0xFC00 and 0xFFFF. This exists solely # to remove the need to create 1024 "ManufacturerSpecificCluster" instances. cluster_id_range: tuple[t.uint16_t, t.uint16_t] = None # Deprecated: clusters contain attributes and both client and server commands attributes: dict[int, foundation.ZCLAttributeDef] = {} client_commands: dict[int, foundation.ZCLCommandDef] = {} server_commands: dict[int, foundation.ZCLCommandDef] = {} attributes_by_name: dict[str, foundation.ZCLAttributeDef] = {} commands_by_name: dict[str, foundation.ZCLCommandDef] = {} # Internal caches and indices _registry: dict = {} _registry_range: dict = {} def __init_subclass__(cls) -> None: if cls.cluster_id is not None: cls.cluster_id = t.ClusterId(cls.cluster_id) # Compile the old command definitions for commands in [cls.server_commands, cls.client_commands]: for command_id, command in list(commands.items()): if isinstance(command, tuple): # Backwards compatibility with old command tuples name, schema, direction = command command = foundation.ZCLCommandDef( id=command_id, name=name, schema=convert_list_schema(schema, command_id, direction), direction=direction, ) command = command.replace(id=command_id).with_compiled_schema() commands[command.id] = command # Compile the old attribute definitions for attr_id, attr in list(cls.attributes.items()): if isinstance(attr, tuple): if len(attr) == 2: attr_name, attr_type = attr attr_manuf_specific = False else: attr_name, attr_type, attr_manuf_specific = attr attr = foundation.ZCLAttributeDef( id=attr_id, name=attr_name, type=attr_type, is_manufacturer_specific=attr_manuf_specific, ) else: attr = attr.replace(id=attr_id) cls.attributes[attr.id] = attr.replace(id=attr_id) # Create new definitions from the old-style definitions if cls.attributes and "AttributeDefs" not in cls.__dict__: cls.AttributeDefs = types.new_class( name="AttributeDefs", bases=(BaseAttributeDefs,), ) for attr in cls.attributes.values(): setattr(cls.AttributeDefs, attr.name, attr) if cls.server_commands and "ServerCommandDefs" not in cls.__dict__: cls.ServerCommandDefs = types.new_class( name="ServerCommandDefs", bases=(BaseCommandDefs,), ) for command in cls.server_commands.values(): setattr(cls.ServerCommandDefs, command.name, command) if cls.client_commands and "ClientCommandDefs" not in cls.__dict__: cls.ClientCommandDefs = types.new_class( name="ClientCommandDefs", bases=(BaseCommandDefs,), ) for command in cls.client_commands.values(): setattr(cls.ClientCommandDefs, command.name, command) # Check the old definitions for duplicates for old_defs in [cls.attributes, cls.server_commands, cls.client_commands]: counts = collections.Counter(d.name for d in old_defs.values()) if len(counts) != sum(counts.values()): duplicates = [n for n, c in counts.items() if c > 1] raise TypeError(f"Duplicate definitions exist for {duplicates}") # Populate the `name` attribute of every definition for defs in (cls.ServerCommandDefs, cls.ClientCommandDefs, cls.AttributeDefs): for name in dir(defs): definition = getattr(defs, name) if ( isinstance( definition, (foundation.ZCLCommandDef, foundation.ZCLAttributeDef), ) and definition.name is None ): object.__setattr__(definition, "name", name) # Compile the schemas for defs in (cls.ServerCommandDefs, cls.ClientCommandDefs): for name in dir(defs): definition = getattr(defs, name) if isinstance(definition, foundation.ZCLCommandDef): setattr(defs, definition.name, definition.with_compiled_schema()) # Recreate the old structures using the new-style definitions cls.attributes = {attr.id: attr for attr in cls.AttributeDefs} cls.client_commands = {cmd.id: cmd for cmd in cls.ClientCommandDefs} cls.server_commands = {cmd.id: cmd for cmd in cls.ServerCommandDefs} cls.attributes_by_name = {attr.name: attr for attr in cls.AttributeDefs} all_cmds: Iterable[foundation.ZCLCommandDef] = itertools.chain( cls.ClientCommandDefs, cls.ServerCommandDefs ) cls.commands_by_name = {cmd.name: cmd for cmd in all_cmds} if cls._skip_registry: return if cls.cluster_id is not None: cls._registry[cls.cluster_id] = cls if cls.cluster_id_range is not None: cls._registry_range[cls.cluster_id_range] = cls def __init__(self, endpoint: EndpointType, is_server: bool = True) -> None: self._endpoint: EndpointType = endpoint self._attr_cache: dict[int, Any] = {} self._attr_last_updated: dict[int, datetime] = {} self.unsupported_attributes: set[int | str] = set() self._listeners = {} self._type: ClusterType = ( ClusterType.Server if is_server else ClusterType.Client ) @property def attridx(self): warnings.warn( "`attridx` has been replaced by `attributes_by_name`", DeprecationWarning ) return self.attributes_by_name def find_attribute(self, name_or_id: int | str) -> foundation.ZCLAttributeDef: if isinstance(name_or_id, str): return self.attributes_by_name[name_or_id] elif isinstance(name_or_id, int): return self.attributes[name_or_id] else: raise ValueError( f"Attribute must be either a string or an integer," f" not {name_or_id!r} ({type(name_or_id)!r}" ) @classmethod def from_id( cls, endpoint: EndpointType, cluster_id: int, is_server: bool = True ) -> Cluster: cluster_id = t.ClusterId(cluster_id) if cluster_id in cls._registry: return cls._registry[cluster_id](endpoint, is_server) for (start, end), cluster in cls._registry_range.items(): if start <= cluster_id <= end: cluster = cluster(endpoint, is_server) cluster.cluster_id = cluster_id return cluster LOGGER.debug("Unknown cluster 0x%04X", cluster_id) cluster = cls(endpoint, is_server) cluster.cluster_id = cluster_id return cluster def deserialize(self, data: bytes) -> tuple[foundation.ZCLHeader, ...]: self.debug("Received ZCL frame: %r", data) hdr, data = foundation.ZCLHeader.deserialize(data) self.debug("Decoded ZCL frame header: %r", hdr) if hdr.frame_control.frame_type == foundation.FrameType.CLUSTER_COMMAND: # Cluster command if hdr.direction == foundation.Direction.Server_to_Client: commands = self.client_commands else: commands = self.server_commands if hdr.command_id not in commands: self.debug("Unknown cluster command %s %s", hdr.command_id, data) return hdr, data command = commands[hdr.command_id] else: # General command if hdr.command_id not in foundation.GENERAL_COMMANDS: self.debug("Unknown foundation command %s %s", hdr.command_id, data) return hdr, data command = foundation.GENERAL_COMMANDS[hdr.command_id] hdr.frame_control.direction = command.direction response, data = command.schema.deserialize(data) self.debug("Decoded ZCL frame: %s:%r", type(self).__name__, response) if data: self.debug("Data remains after deserializing ZCL frame: %r", data) return hdr, response def _create_request( self, *, general: bool, command_id: foundation.GeneralCommand | int, schema: dict | t.Struct, manufacturer: int | None = None, tsn: int | None = None, disable_default_response: bool, direction: foundation.Direction, # Schema args and kwargs args: tuple[Any, ...], kwargs: Any, ) -> tuple[foundation.ZCLHeader, bytes]: # Convert out-of-band dict schemas to struct schemas if isinstance(schema, (tuple, list)): schema = convert_list_schema( command_id=command_id, schema=schema, direction=foundation.Direction.Client_to_Server, ) request = schema(*args, **kwargs) # type:ignore[operator] request.serialize() # Throw an error before generating a new TSN if tsn is None: tsn = self._endpoint.device.get_sequence() frame_control = foundation.FrameControl( frame_type=( foundation.FrameType.GLOBAL_COMMAND if general else foundation.FrameType.CLUSTER_COMMAND ), is_manufacturer_specific=(manufacturer is not None), direction=direction, disable_default_response=disable_default_response, reserved=0b000, ) hdr = foundation.ZCLHeader( frame_control=frame_control, manufacturer=manufacturer, tsn=tsn, command_id=command_id, ) return hdr, request async def request( self, general: bool, command_id: foundation.GeneralCommand | int | t.uint8_t, schema: dict | t.Struct, *args, manufacturer: int | t.uint16_t | None = None, expect_reply: bool = True, tsn: int | t.uint8_t | None = None, **kwargs, ): hdr, request = self._create_request( general=general, command_id=command_id, schema=schema, manufacturer=manufacturer, tsn=tsn, disable_default_response=self.is_client, direction=( foundation.Direction.Server_to_Client if self.is_client else foundation.Direction.Client_to_Server ), args=args, kwargs=kwargs, ) self.debug("Sending request header: %r", hdr) self.debug("Sending request: %r", request) data = hdr.serialize() + request.serialize() return await self._endpoint.request( self.cluster_id, hdr.tsn, data, expect_reply=expect_reply, command_id=hdr.command_id, ) async def reply( self, general: bool, command_id: foundation.GeneralCommand | int | t.uint8_t, schema: dict | t.Struct, *args, manufacturer: int | t.uint16_t | None = None, tsn: int | t.uint8_t | None = None, **kwargs, ) -> None: hdr, request = self._create_request( general=general, command_id=command_id, schema=schema, manufacturer=manufacturer, tsn=tsn, disable_default_response=True, direction=( foundation.Direction.Server_to_Client if self.is_client else foundation.Direction.Client_to_Server ), args=args, kwargs=kwargs, ) self.debug("Sending reply header: %r", hdr) self.debug("Sending reply: %r", request) data = hdr.serialize() + request.serialize() return await self._endpoint.reply( self.cluster_id, hdr.tsn, data, command_id=hdr.command_id ) def handle_message( self, hdr: foundation.ZCLHeader, args: list[Any], *, dst_addressing: AddressingMode | None = None, ) -> None: self.debug( "Received command 0x%02X (TSN %d): %s", hdr.command_id, hdr.tsn, args ) if hdr.frame_control.is_cluster: self.handle_cluster_request(hdr, args, dst_addressing=dst_addressing) self.listener_event("cluster_command", hdr.tsn, hdr.command_id, args) return self.listener_event("general_command", hdr, args) self.handle_cluster_general_request(hdr, args, dst_addressing=dst_addressing) def handle_cluster_request( self, hdr: foundation.ZCLHeader, args: list[Any], *, dst_addressing: AddressingMode | None = None, ): self.debug( "No explicit handler for cluster command 0x%02x: %s", hdr.command_id, args, ) def handle_cluster_general_request( self, hdr: foundation.ZCLHeader, args: list, *, dst_addressing: AddressingMode | None = None, ) -> None: if hdr.command_id == foundation.GeneralCommand.Report_Attributes: values = [] for a in args.attribute_reports: if a.attrid in self.attributes: values.append(f"{self.attributes[a.attrid].name}={a.value.value!r}") else: values.append(f"0x{a.attrid:04X}={a.value.value!r}") self.debug("Attribute report received: %s", ", ".join(values)) for attr in args.attribute_reports: try: value = self.attributes[attr.attrid].type(attr.value.value) except KeyError: value = attr.value.value except ValueError: self.debug( "Couldn't normalize %a attribute with %s value", attr.attrid, attr.value.value, exc_info=True, ) value = attr.value.value self._update_attribute(attr.attrid, value) if not hdr.frame_control.disable_default_response: self.send_default_rsp( hdr, foundation.Status.SUCCESS, ) def read_attributes_raw(self, attributes, manufacturer=None): attributes = [t.uint16_t(a) for a in attributes] return self._read_attributes(attributes, manufacturer=manufacturer) async def read_attributes( self, attributes: list[int | str], allow_cache: bool = False, only_cache: bool = False, manufacturer: int | t.uint16_t | None = None, ) -> Any: success, failure = {}, {} attribute_ids: list[int] = [] orig_attributes: dict[int, int | str] = {} for attribute in attributes: if isinstance(attribute, str): attrid = self.attributes_by_name[attribute].id else: # Allow reading attributes that aren't defined attrid = attribute attribute_ids.append(attrid) orig_attributes[attrid] = attribute to_read = [] if allow_cache or only_cache: for idx, attribute in enumerate(attribute_ids): if attribute in self._attr_cache: success[attributes[idx]] = self._attr_cache[attribute] elif attribute in self.unsupported_attributes: failure[attributes[idx]] = foundation.Status.UNSUPPORTED_ATTRIBUTE else: to_read.append(attribute) else: to_read = attribute_ids if not to_read or only_cache: return success, failure result = await self.read_attributes_raw(to_read, manufacturer=manufacturer) if not isinstance(result[0], list): for attrid in to_read: orig_attribute = orig_attributes[attrid] failure[orig_attribute] = result[0] # Assume default response else: for record in result[0]: orig_attribute = orig_attributes[record.attrid] if record.status == foundation.Status.SUCCESS: try: value = self.attributes[record.attrid].type(record.value.value) except KeyError: value = record.value.value except ValueError: value = record.value.value self.debug( "Couldn't normalize %a attribute with %s value", record.attrid, value, exc_info=True, ) self._update_attribute(record.attrid, value) success[orig_attribute] = value self.remove_unsupported_attribute(record.attrid) else: if record.status == foundation.Status.UNSUPPORTED_ATTRIBUTE: self.add_unsupported_attribute(record.attrid) failure[orig_attribute] = record.status return success, failure def read_attributes_rsp(self, attributes, manufacturer=None, *, tsn=None): args = [] for attrid, value in attributes.items(): if isinstance(attrid, str): attrid = self.attributes_by_name[attrid].id a = foundation.ReadAttributeRecord( attrid, foundation.Status.UNSUPPORTED_ATTRIBUTE, foundation.TypeValue() ) args.append(a) if value is None: continue try: a.status = foundation.Status.SUCCESS python_type = self.attributes[attrid].type a.value.type = foundation.DATA_TYPES.pytype_to_datatype_id(python_type) a.value.value = python_type(value) except ValueError as e: a.status = foundation.Status.UNSUPPORTED_ATTRIBUTE self.error(str(e)) return self._read_attributes_rsp(args, manufacturer=manufacturer, tsn=tsn) def _write_attr_records( self, attributes: dict[str | int, Any] ) -> list[foundation.Attribute]: args = [] for attrid, value in attributes.items(): try: attr_def = self.find_attribute(attrid) except KeyError: self.error("%s is not a valid attribute id", attrid) # Throw an error if it's an unknown attribute name, without an ID if isinstance(attrid, str): raise continue attr = foundation.Attribute(attr_def.id, foundation.TypeValue()) attr.value.type = foundation.DATA_TYPES.pytype_to_datatype_id(attr_def.type) try: attr.value.value = attr_def.type(value) except ValueError as e: self.error( "Failed to convert attribute 0x%04X from %s (%s) to type %s: %s", attrid, value, type(value), attr_def.type, e, ) else: args.append(attr) return args async def write_attributes( self, attributes: dict[str | int, Any], manufacturer: int | None = None ) -> list: """Write attributes to device with internal 'attributes' validation""" attrs = self._write_attr_records(attributes) return await self.write_attributes_raw(attrs, manufacturer) async def write_attributes_raw( self, attrs: list[foundation.Attribute], manufacturer: int | None = None ) -> list: """Write attributes to device without internal 'attributes' validation""" result = await self._write_attributes(attrs, manufacturer=manufacturer) if not isinstance(result[0], list): return result records = result[0] if len(records) == 1 and records[0].status == foundation.Status.SUCCESS: for attr_rec in attrs: self._update_attribute(attr_rec.attrid, attr_rec.value.value) else: failed = [rec.attrid for rec in records] for attr_rec in attrs: if attr_rec.attrid not in failed: self._update_attribute(attr_rec.attrid, attr_rec.value.value) return result def write_attributes_undivided( self, attributes: dict[str | int, Any], manufacturer: int | None = None ) -> list: """Either all or none of the attributes are written by the device.""" args = self._write_attr_records(attributes) return self._write_attributes_undivided(args, manufacturer=manufacturer) def bind(self): return self._endpoint.device.zdo.bind(cluster=self) def unbind(self): return self._endpoint.device.zdo.unbind(cluster=self) def _attr_reporting_rec( self, attribute: int | str, min_interval: int, max_interval: int, reportable_change: int = 1, direction: int = 0x00, ) -> foundation.AttributeReportingConfig: try: attr_def = self.find_attribute(attribute) except KeyError: raise ValueError(f"Unknown attribute {attribute!r} of {self} cluster") cfg = foundation.AttributeReportingConfig() cfg.direction = direction cfg.attrid = attr_def.id cfg.datatype = foundation.DATA_TYPES.pytype_to_datatype_id(attr_def.type) cfg.min_interval = min_interval cfg.max_interval = max_interval cfg.reportable_change = reportable_change return cfg async def configure_reporting( self, attribute: int | str, min_interval: int, max_interval: int, reportable_change: int, manufacturer: int | None = None, ) -> list[foundation.ConfigureReportingResponseRecord]: """Configure attribute reporting for a single attribute.""" response = await self.configure_reporting_multiple( {attribute: (min_interval, max_interval, reportable_change)}, manufacturer=manufacturer, ) return response async def configure_reporting_multiple( self, attributes: dict[int | str, tuple[int, int, int]], manufacturer: int | None = None, ) -> list[foundation.ConfigureReportingResponseRecord]: """Configure attribute reporting for multiple attributes in the same request. :param attributes: dict of attributes to configure attribute reporting. Key is either int or str for attribute id or attribute name. Value is a tuple of: - minimum reporting interval - maximum reporting interval - reportable change :param manufacturer: optional manufacturer id to use with the command """ cfg = [ self._attr_reporting_rec(attr, rep[0], rep[1], rep[2]) for attr, rep in attributes.items() ] res = await self._configure_reporting(cfg, manufacturer=manufacturer) # Parse configure reporting result for unsupported attributes records = res[0] if ( isinstance(records, list) and not ( len(records) == 1 and records[0].status == foundation.Status.SUCCESS ) and len(records) >= 0 ): failed = [ r.attrid for r in records if r.status == foundation.Status.UNSUPPORTED_ATTRIBUTE ] for attr in failed: self.add_unsupported_attribute(attr) success = [ r.attrid for r in records if r.status == foundation.Status.SUCCESS ] for attr in success: self.remove_unsupported_attribute(attr) elif isinstance(records, list) and ( len(records) == 1 and records[0].status == foundation.Status.SUCCESS ): # we get a single success when all are supported for attr in attributes: self.remove_unsupported_attribute(attr) return res def command( self, command_id: foundation.GeneralCommand | int | t.uint8_t, *args, manufacturer: int | t.uint16_t | None = None, expect_reply: bool = True, tsn: int | t.uint8_t | None = None, **kwargs, ): command = self.server_commands[command_id] return self.request( False, command_id, command.schema, *args, manufacturer=manufacturer, expect_reply=expect_reply, tsn=tsn, **kwargs, ) def client_command( self, command_id: foundation.GeneralCommand | int | t.uint8_t, *args, manufacturer: int | t.uint16_t | None = None, tsn: int | t.uint8_t | None = None, **kwargs, ): command = self.client_commands[command_id] return self.reply( False, command_id, command.schema, *args, manufacturer=manufacturer, tsn=tsn, **kwargs, ) @property def is_client(self) -> bool: """Return True if this is a client cluster.""" return self._type == ClusterType.Client @property def is_server(self) -> bool: """Return True if this is a server cluster.""" return self._type == ClusterType.Server @property def name(self) -> str: return self.__class__.__name__ @property def endpoint(self) -> Endpoint: return self._endpoint @property def commands(self): return list(self.ServerCommandDefs) def update_attribute(self, attrid: int | t.uint16_t, value: Any) -> None: """Update specified attribute with specified value""" self._update_attribute(attrid, value) def _update_attribute(self, attrid: int | t.uint16_t, value: Any) -> None: now = datetime.now(timezone.utc) self._attr_cache[attrid] = value self._attr_last_updated[attrid] = now self.listener_event("attribute_updated", attrid, value, now) def log(self, lvl: int, msg: str, *args, **kwargs) -> None: msg = "[%s:%s:0x%04x] " + msg args = ( self._endpoint.device.name, self._endpoint.endpoint_id, self.cluster_id, ) + args return LOGGER.log(lvl, msg, *args, **kwargs) def __getattr__(self, name: str) -> functools.partial: try: cmd = getattr(self.ClientCommandDefs, name) except AttributeError: pass else: return functools.partial(self.client_command, cmd.id) try: cmd = getattr(self.ServerCommandDefs, name) except AttributeError: pass else: return functools.partial(self.command, cmd.id) raise AttributeError(f"No such command name: {name}") def get(self, key: int | str, default: Any | None = None) -> Any: """Get cached attribute.""" attr_def = self.find_attribute(key) return self._attr_cache.get(attr_def.id, default) def __getitem__(self, key: int | str) -> Any: """Return cached value of the attr.""" return self._attr_cache[self.find_attribute(key).id] def __setitem__(self, key: int | str, value: Any) -> None: """Set cached value through attribute write.""" if not isinstance(key, (int, str)): raise ValueError("attr_name or attr_id are accepted only") self.create_catching_task(self.write_attributes({key: value})) def general_command( self, command_id: foundation.GeneralCommand | int | t.uint8_t, *args, manufacturer: int | t.uint16_t | None = None, expect_reply: bool = True, tsn: int | t.uint8_t | None = None, **kwargs, ): command = foundation.GENERAL_COMMANDS[command_id] if command.direction == foundation.Direction.Server_to_Client: # should reply be retryable? return self.reply( True, command.id, command.schema, *args, manufacturer=manufacturer, tsn=tsn, **kwargs, ) return self.request( True, command.id, command.schema, *args, manufacturer=manufacturer, expect_reply=expect_reply, tsn=tsn, **kwargs, ) _configure_reporting = functools.partialmethod( general_command, foundation.GeneralCommand.Configure_Reporting ) _read_attributes = functools.partialmethod( general_command, foundation.GeneralCommand.Read_Attributes ) _read_attributes_rsp = functools.partialmethod( general_command, foundation.GeneralCommand.Read_Attributes_rsp ) _write_attributes = functools.partialmethod( general_command, foundation.GeneralCommand.Write_Attributes ) _write_attributes_undivided = functools.partialmethod( general_command, foundation.GeneralCommand.Write_Attributes_Undivided ) discover_attributes = functools.partialmethod( general_command, foundation.GeneralCommand.Discover_Attributes ) discover_attributes_extended = functools.partialmethod( general_command, foundation.GeneralCommand.Discover_Attribute_Extended ) discover_commands_received = functools.partialmethod( general_command, foundation.GeneralCommand.Discover_Commands_Received ) discover_commands_generated = functools.partialmethod( general_command, foundation.GeneralCommand.Discover_Commands_Generated ) def send_default_rsp( self, hdr: foundation.ZCLHeader, status: foundation.Status = foundation.Status.SUCCESS, ) -> None: """Send default response unconditionally.""" self.create_catching_task( self.general_command( foundation.GeneralCommand.Default_Response, hdr.command_id, status, tsn=hdr.tsn, ) ) def add_unsupported_attribute( self, attr: int | str, inhibit_events: bool = False ) -> None: """Adds unsupported attribute.""" if attr in self.unsupported_attributes: return self.unsupported_attributes.add(attr) if isinstance(attr, int) and not inhibit_events: self.listener_event("unsupported_attribute_added", attr) try: attrdef = self.find_attribute(attr) except KeyError: pass else: if isinstance(attr, int): self.add_unsupported_attribute(attrdef.name, inhibit_events) else: self.add_unsupported_attribute(attrdef.id, inhibit_events) def remove_unsupported_attribute( self, attr: int | str, inhibit_events: bool = False ) -> None: """Removes an unsupported attribute.""" if attr not in self.unsupported_attributes: return self.unsupported_attributes.remove(attr) if isinstance(attr, int) and not inhibit_events: self.listener_event("unsupported_attribute_removed", attr) try: attrdef = self.find_attribute(attr) except KeyError: pass else: if isinstance(attr, int): self.remove_unsupported_attribute(attrdef.name, inhibit_events) else: self.remove_unsupported_attribute(attrdef.id, inhibit_events) class ClusterPersistingListener: def __init__(self, applistener: PersistingListener, cluster: Cluster) -> None: self._applistener = applistener self._cluster = cluster def attribute_updated( self, attrid: int | t.uint16_t, value: Any, timestamp: datetime ) -> None: self._applistener.attribute_updated(self._cluster, attrid, value, timestamp) def cluster_command(self, *args, **kwargs) -> None: pass def general_command(self, *args, **kwargs) -> None: pass def unsupported_attribute_added(self, attrid: int) -> None: """An unsupported attribute was added.""" self._applistener.unsupported_attribute_added(self._cluster, attrid) def unsupported_attribute_removed(self, attrid: int) -> None: """Remove an unsupported attribute.""" self._applistener.unsupported_attribute_removed(self._cluster, attrid) # Import to populate the registry from . import clusters # noqa: F401, E402, isort:skip zigpy-0.62.3/zigpy/zcl/clusters/000077500000000000000000000000001456054056700165565ustar00rootroot00000000000000zigpy-0.62.3/zigpy/zcl/clusters/__init__.py000066400000000000000000000020241456054056700206650ustar00rootroot00000000000000from __future__ import annotations import inspect from . import ( closures, general, homeautomation, hvac, lighting, lightlink, manufacturer_specific, measurement, protocol, security, smartenergy, ) from .. import Cluster CLUSTERS_BY_ID: dict[int, Cluster] = {} CLUSTERS_BY_NAME: dict[str, Cluster] = {} for cls in ( closures, general, homeautomation, hvac, lighting, lightlink, manufacturer_specific, measurement, protocol, security, smartenergy, ): for name in dir(cls): obj = getattr(cls, name) # Object must be a concrete Cluster subclass if ( not inspect.isclass(obj) or not issubclass(obj, Cluster) or obj.cluster_id is None ): continue assert CLUSTERS_BY_ID.get(obj.cluster_id, obj) is obj assert CLUSTERS_BY_NAME.get(obj.ep_attribute, obj) is obj CLUSTERS_BY_ID[obj.cluster_id] = obj CLUSTERS_BY_NAME[obj.ep_attribute] = obj zigpy-0.62.3/zigpy/zcl/clusters/closures.py000066400000000000000000000674441456054056700210060ustar00rootroot00000000000000"""Closures Functional Domain""" from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster, foundation from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class ShadeStatus(t.bitmap8): Operational = 0b00000001 Adjusting = 0b00000010 Opening = 0b00000100 Motor_forward_is_opening = 0b00001000 class ShadeMode(t.enum8): Normal = 0x00 Configure = 0x00 Unknown = 0xFF class Shade(Cluster): """Attributes and commands for configuring a shade""" ShadeStatus: Final = ShadeStatus ShadeMode: Final = ShadeMode cluster_id: Final = 0x0100 name: Final = "Shade Configuration" ep_attribute: Final = "shade" class AttributeDefs(BaseAttributeDefs): # Shade Information physical_closed_limit: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="r" ) motor_step_size: Final = ZCLAttributeDef(id=0x0001, type=t.uint8_t, access="r") status: Final = ZCLAttributeDef( id=0x0002, type=ShadeStatus, access="rw", mandatory=True ) # Shade Settings closed_limit: Final = ZCLAttributeDef( id=0x0010, type=t.uint16_t, access="rw", mandatory=True ) mode: Final = ZCLAttributeDef( id=0x0012, type=ShadeMode, access="rw", mandatory=True ) class LockState(t.enum8): Not_fully_locked = 0x00 Locked = 0x01 Unlocked = 0x02 Undefined = 0xFF class LockType(t.enum8): Dead_bolt = 0x00 Magnetic = 0x01 Other = 0x02 Mortise = 0x03 Rim = 0x04 Latch_bolt = 0x05 Cylindrical_lock = 0x06 Tubular_lock = 0x07 Interconnected_lock = 0x08 Dead_latch = 0x09 Door_furniture = 0x0A class DoorState(t.enum8): Open = 0x00 Closed = 0x01 Error_jammed = 0x02 Error_forced_open = 0x03 Error_unspecified = 0x04 Undefined = 0xFF class OperatingMode(t.enum8): Normal = 0x00 Vacation = 0x01 Privacy = 0x02 No_RF_Lock_Unlock = 0x03 Passage = 0x04 class SupportedOperatingModes(t.bitmap16): Normal = 0x0001 Vacation = 0x0002 Privacy = 0x0004 No_RF = 0x0008 Passage = 0x0010 class DefaultConfigurationRegister(t.bitmap16): Enable_Local_Programming = 0x0001 Keypad_Interface_default_access = 0x0002 RF_Interface_default_access = 0x0004 Sound_Volume_non_zero = 0x0020 Auto_Relock_time_non_zero = 0x0040 Led_settings_non_zero = 0x0080 class ZigbeeSecurityLevel(t.enum8): Network_Security = 0x00 APS_Security = 0x01 class AlarmMask(t.bitmap16): Deadbolt_Jammed = 0x0001 Lock_Reset_to_Factory_Defaults = 0x0002 Reserved = 0x0004 RF_Module_Power_Cycled = 0x0008 Tamper_Alarm_wrong_code_entry_limit = 0x0010 Tamper_Alarm_front_escutcheon_removed = 0x0020 Forced_Door_Open_under_Door_Lockec_Condition = 0x0040 class KeypadOperationEventMask(t.bitmap16): Manufacturer_specific = 0x0001 Lock_source_keypad = 0x0002 Unlock_source_keypad = 0x0004 Lock_source_keypad_error_invalid_code = 0x0008 Lock_source_keypad_error_invalid_schedule = 0x0010 Unlock_source_keypad_error_invalid_code = 0x0020 Unlock_source_keypad_error_invalid_schedule = 0x0040 Non_Access_User_Operation = 0x0080 class RFOperationEventMask(t.bitmap16): Manufacturer_specific = 0x0001 Lock_source_RF = 0x0002 Unlock_source_RF = 0x0004 Lock_source_RF_error_invalid_code = 0x0008 Lock_source_RF_error_invalid_schedule = 0x0010 Unlock_source_RF_error_invalid_code = 0x0020 Unlock_source_RF_error_invalid_schedule = 0x0040 class ManualOperatitonEventMask(t.bitmap16): Manufacturer_specific = 0x0001 Thumbturn_Lock = 0x0002 Thumbturn_Unlock = 0x0004 One_touch_lock = 0x0008 Key_Lock = 0x0010 Key_Unlock = 0x0020 Auto_lock = 0x0040 Schedule_Lock = 0x0080 Schedule_Unlock = 0x0100 Manual_Lock_key_or_thumbturn = 0x0200 Manual_Unlock_key_or_thumbturn = 0x0400 class RFIDOperationEventMask(t.bitmap16): Manufacturer_specific = 0x0001 Lock_source_RFID = 0x0002 Unlock_source_RFID = 0x0004 Lock_source_RFID_error_invalid_RFID_ID = 0x0008 Lock_source_RFID_error_invalid_schedule = 0x0010 Unlock_source_RFID_error_invalid_RFID_ID = 0x0020 Unlock_source_RFID_error_invalid_schedule = 0x0040 class KeypadProgrammingEventMask(t.bitmap16): Manufacturer_Specific = 0x0001 Master_code_changed = 0x0002 PIN_added = 0x0004 PIN_deleted = 0x0008 PIN_changed = 0x0010 class RFProgrammingEventMask(t.bitmap16): Manufacturer_Specific = 0x0001 PIN_added = 0x0004 PIN_deleted = 0x0008 PIN_changed = 0x0010 RFID_code_added = 0x0020 RFID_code_deleted = 0x0040 class RFIDProgrammingEventMask(t.bitmap16): Manufacturer_Specific = 0x0001 RFID_code_added = 0x0020 RFID_code_deleted = 0x0040 class OperationEventSource(t.enum8): Keypad = 0x00 RF = 0x01 Manual = 0x02 RFID = 0x03 Indeterminate = 0xFF class OperationEvent(t.enum8): UnknownOrMfgSpecific = 0x00 Lock = 0x01 Unlock = 0x02 LockFailureInvalidPINorID = 0x03 LockFailureInvalidSchedule = 0x04 UnlockFailureInvalidPINorID = 0x05 UnlockFailureInvalidSchedule = 0x06 OnTouchLock = 0x07 KeyLock = 0x08 KeyUnlock = 0x09 AutoLock = 0x0A ScheduleLock = 0x0B ScheduleUnlock = 0x0C Manual_Lock = 0x0D Manual_Unlock = 0x0E Non_Access_User_Operational_Event = 0x0F class ProgrammingEvent(t.enum8): UnknownOrMfgSpecific = 0x00 MasterCodeChanged = 0x01 PINCodeAdded = 0x02 PINCodeDeleted = 0x03 PINCodeChanges = 0x04 RFIDCodeAdded = 0x05 RFIDCodeDeleted = 0x06 class UserStatus(t.enum8): Available = 0x00 Enabled = 0x01 Disabled = 0x03 Not_Supported = 0xFF class UserType(t.enum8): Unrestricted = 0x00 Year_Day_Schedule_User = 0x01 Week_Day_Schedule_User = 0x02 Master_User = 0x03 Non_Access_User = 0x04 Not_Supported = 0xFF class DayMask(t.bitmap8): Sun = 0x01 Mon = 0x02 Tue = 0x04 Wed = 0x08 Thu = 0x10 Fri = 0x20 Sat = 0x40 class EventType(t.enum8): Operation = 0x00 Programming = 0x01 Alarm = 0x02 class DoorLock(Cluster): """The door lock cluster provides an interface to a generic way to secure a door.""" LockState: Final = LockState LockType: Final = LockType DoorState: Final = DoorState OperatingMode: Final = OperatingMode SupportedOperatingModes: Final = SupportedOperatingModes DefaultConfigurationRegister: Final = DefaultConfigurationRegister ZigbeeSecurityLevel: Final = ZigbeeSecurityLevel AlarmMask: Final = AlarmMask KeypadOperationEventMask: Final = KeypadOperationEventMask RFOperationEventMask: Final = RFOperationEventMask ManualOperatitonEventMask: Final = ManualOperatitonEventMask RFIDOperationEventMask: Final = RFIDOperationEventMask KeypadProgrammingEventMask: Final = KeypadProgrammingEventMask RFProgrammingEventMask: Final = RFProgrammingEventMask RFIDProgrammingEventMask: Final = RFIDProgrammingEventMask OperationEventSource: Final = OperationEventSource OperationEvent: Final = OperationEvent ProgrammingEvent: Final = ProgrammingEvent UserStatus: Final = UserStatus UserType: Final = UserType DayMask: Final = DayMask EventType: Final = EventType cluster_id: Final = 0x0101 name: Final = "Door Lock" ep_attribute: Final = "door_lock" class AttributeDefs(BaseAttributeDefs): lock_state: Final = ZCLAttributeDef( id=0x0000, type=LockState, access="rp", mandatory=True ) lock_type: Final = ZCLAttributeDef( id=0x0001, type=LockType, access="r", mandatory=True ) actuator_enabled: Final = ZCLAttributeDef( id=0x0002, type=t.Bool, access="r", mandatory=True ) door_state: Final = ZCLAttributeDef(id=0x0003, type=DoorState, access="rp") door_open_events: Final = ZCLAttributeDef( id=0x0004, type=t.uint32_t, access="rw" ) door_closed_events: Final = ZCLAttributeDef( id=0x0005, type=t.uint32_t, access="rw" ) open_period: Final = ZCLAttributeDef(id=0x0006, type=t.uint16_t, access="rw") num_of_lock_records_supported: Final = ZCLAttributeDef( id=0x0010, type=t.uint16_t, access="r" ) num_of_total_users_supported: Final = ZCLAttributeDef( id=0x0011, type=t.uint16_t, access="r" ) num_of_pin_users_supported: Final = ZCLAttributeDef( id=0x0012, type=t.uint16_t, access="r" ) num_of_rfid_users_supported: Final = ZCLAttributeDef( id=0x0013, type=t.uint16_t, access="r" ) num_of_week_day_schedules_supported_per_user: Final = ZCLAttributeDef( id=0x0014, type=t.uint8_t, access="r" ) num_of_year_day_schedules_supported_per_user: Final = ZCLAttributeDef( id=0x0015, type=t.uint8_t, access="r" ) num_of_holiday_scheduleds_supported: Final = ZCLAttributeDef( id=0x0016, type=t.uint8_t, access="r" ) max_pin_len: Final = ZCLAttributeDef(id=0x0017, type=t.uint8_t, access="r") min_pin_len: Final = ZCLAttributeDef(id=0x0018, type=t.uint8_t, access="r") max_rfid_len: Final = ZCLAttributeDef(id=0x0019, type=t.uint8_t, access="r") min_rfid_len: Final = ZCLAttributeDef(id=0x001A, type=t.uint8_t, access="r") enable_logging: Final = ZCLAttributeDef(id=0x0020, type=t.Bool, access="r*wp") language: Final = ZCLAttributeDef( id=0x0021, type=t.LimitedCharString(3), access="r*wp" ) led_settings: Final = ZCLAttributeDef(id=0x0022, type=t.uint8_t, access="r*wp") auto_relock_time: Final = ZCLAttributeDef( id=0x0023, type=t.uint32_t, access="r*wp" ) sound_volume: Final = ZCLAttributeDef(id=0x0024, type=t.uint8_t, access="r*wp") operating_mode: Final = ZCLAttributeDef( id=0x0025, type=OperatingMode, access="r*wp" ) supported_operating_modes: Final = ZCLAttributeDef( id=0x0026, type=SupportedOperatingModes, access="r" ) default_configuration_register: Final = ZCLAttributeDef( id=0x0027, type=DefaultConfigurationRegister, access="rp", ) enable_local_programming: Final = ZCLAttributeDef( id=0x0028, type=t.Bool, access="r*wp" ) enable_one_touch_locking: Final = ZCLAttributeDef( id=0x0029, type=t.Bool, access="rwp" ) enable_inside_status_led: Final = ZCLAttributeDef( id=0x002A, type=t.Bool, access="rwp" ) enable_privacy_mode_button: Final = ZCLAttributeDef( id=0x002B, type=t.Bool, access="rwp" ) wrong_code_entry_limit: Final = ZCLAttributeDef( id=0x0030, type=t.uint8_t, access="r*wp" ) user_code_temporary_disable_time: Final = ZCLAttributeDef( id=0x0031, type=t.uint8_t, access="r*wp" ) send_pin_ota: Final = ZCLAttributeDef(id=0x0032, type=t.Bool, access="r*wp") require_pin_for_rf_operation: Final = ZCLAttributeDef( id=0x0033, type=t.Bool, access="r*wp" ) zigbee_security_level: Final = ZCLAttributeDef( id=0x0034, type=ZigbeeSecurityLevel, access="rp" ) alarm_mask: Final = ZCLAttributeDef(id=0x0040, type=AlarmMask, access="rwp") keypad_operation_event_mask: Final = ZCLAttributeDef( id=0x0041, type=KeypadOperationEventMask, access="rwp" ) rf_operation_event_mask: Final = ZCLAttributeDef( id=0x0042, type=RFOperationEventMask, access="rwp" ) manual_operation_event_mask: Final = ZCLAttributeDef( id=0x0043, type=ManualOperatitonEventMask, access="rwp" ) rfid_operation_event_mask: Final = ZCLAttributeDef( id=0x0044, type=RFIDOperationEventMask, access="rwp" ) keypad_programming_event_mask: Final = ZCLAttributeDef( id=0x0045, type=KeypadProgrammingEventMask, access="rwp", ) rf_programming_event_mask: Final = ZCLAttributeDef( id=0x0046, type=RFProgrammingEventMask, access="rwp" ) rfid_programming_event_mask: Final = ZCLAttributeDef( id=0x0047, type=RFIDProgrammingEventMask, access="rwp" ) class ServerCommandDefs(BaseCommandDefs): lock_door: Final = ZCLCommandDef( id=0x00, schema={"pin_code?": t.CharacterString}, direction=False ) unlock_door: Final = ZCLCommandDef( id=0x01, schema={"pin_code?": t.CharacterString}, direction=False ) toggle_door: Final = ZCLCommandDef( id=0x02, schema={"pin_code?": t.CharacterString}, direction=False ) unlock_with_timeout: Final = ZCLCommandDef( id=0x03, schema={"timeout": t.uint16_t, "pin_code?": t.CharacterString}, direction=False, ) get_log_record: Final = ZCLCommandDef( id=0x04, schema={"log_index": t.uint16_t}, direction=False ) set_pin_code: Final = ZCLCommandDef( id=0x05, schema={ "user_id": t.uint16_t, "user_status": UserStatus, "user_type": UserType, "pin_code": t.CharacterString, }, direction=False, ) get_pin_code: Final = ZCLCommandDef( id=0x06, schema={"user_id": t.uint16_t}, direction=False ) clear_pin_code: Final = ZCLCommandDef( id=0x07, schema={"user_id": t.uint16_t}, direction=False ) clear_all_pin_codes: Final = ZCLCommandDef(id=0x08, schema={}, direction=False) set_user_status: Final = ZCLCommandDef( id=0x09, schema={"user_id": t.uint16_t, "user_status": UserStatus}, direction=False, ) get_user_status: Final = ZCLCommandDef( id=0x0A, schema={"user_id": t.uint16_t}, direction=False ) set_week_day_schedule: Final = ZCLCommandDef( id=0x0B, schema={ "schedule_id": t.uint8_t, "user_id": t.uint16_t, "days_mask": DayMask, "start_hour": t.uint8_t, "start_minute": t.uint8_t, "end_hour": t.uint8_t, "end_minute": t.uint8_t, }, direction=False, ) get_week_day_schedule: Final = ZCLCommandDef( id=0x0C, schema={"schedule_id": t.uint8_t, "user_id": t.uint16_t}, direction=False, ) clear_week_day_schedule: Final = ZCLCommandDef( id=0x0D, schema={"schedule_id": t.uint8_t, "user_id": t.uint16_t}, direction=False, ) set_year_day_schedule: Final = ZCLCommandDef( id=0x0E, schema={ "schedule_id": t.uint8_t, "user_id": t.uint16_t, "local_start_time": t.LocalTime, "local_end_time": t.LocalTime, }, direction=False, ) get_year_day_schedule: Final = ZCLCommandDef( id=0x0F, schema={"schedule_id": t.uint8_t, "user_id": t.uint16_t}, direction=False, ) clear_year_day_schedule: Final = ZCLCommandDef( id=0x10, schema={"schedule_id": t.uint8_t, "user_id": t.uint16_t}, direction=False, ) set_holiday_schedule: Final = ZCLCommandDef( id=0x11, schema={ "holiday_schedule_id": t.uint8_t, "local_start_time": t.LocalTime, "local_end_time": t.LocalTime, "operating_mode_during_holiday": OperatingMode, }, direction=False, ) get_holiday_schedule: Final = ZCLCommandDef( id=0x12, schema={"holiday_schedule_id": t.uint8_t}, direction=False ) clear_holiday_schedule: Final = ZCLCommandDef( id=0x13, schema={"holiday_schedule_id": t.uint8_t}, direction=False ) set_user_type: Final = ZCLCommandDef( id=0x14, schema={"user_id": t.uint16_t, "user_type": UserType}, direction=False, ) get_user_type: Final = ZCLCommandDef( id=0x15, schema={"user_id": t.uint16_t}, direction=False ) set_rfid_code: Final = ZCLCommandDef( id=0x16, schema={ "user_id": t.uint16_t, "user_status": UserStatus, "user_type": UserType, "rfid_code": t.CharacterString, }, direction=False, ) get_rfid_code: Final = ZCLCommandDef( id=0x17, schema={"user_id": t.uint16_t}, direction=False ) clear_rfid_code: Final = ZCLCommandDef( id=0x18, schema={"user_id": t.uint16_t}, direction=False ) clear_all_rfid_codes: Final = ZCLCommandDef(id=0x19, schema={}, direction=False) class ClientCommandDefs(BaseCommandDefs): lock_door_response: Final = ZCLCommandDef( id=0x00, schema={"status": foundation.Status}, direction=True ) unlock_door_response: Final = ZCLCommandDef( id=0x01, schema={"status": foundation.Status}, direction=True ) toggle_door_response: Final = ZCLCommandDef( id=0x02, schema={"status": foundation.Status}, direction=True ) unlock_with_timeout_response: Final = ZCLCommandDef( id=0x03, schema={"status": foundation.Status}, direction=True ) get_log_record_response: Final = ZCLCommandDef( id=0x04, schema={ "log_entry_id": t.uint16_t, "timestamp": t.uint32_t, "event_type": EventType, "source": OperationEventSource, "event_id_or_alarm_code": t.uint8_t, "user_id": t.uint16_t, "pin?": t.CharacterString, }, direction=True, ) set_pin_code_response: Final = ZCLCommandDef( id=0x05, schema={"status": foundation.Status}, direction=True ) get_pin_code_response: Final = ZCLCommandDef( id=0x06, schema={ "user_id": t.uint16_t, "user_status": UserStatus, "user_type": UserType, "code": t.CharacterString, }, direction=True, ) clear_pin_code_response: Final = ZCLCommandDef( id=0x07, schema={"status": foundation.Status}, direction=True ) clear_all_pin_codes_response: Final = ZCLCommandDef( id=0x08, schema={"status": foundation.Status}, direction=True ) set_user_status_response: Final = ZCLCommandDef( id=0x09, schema={"status": foundation.Status}, direction=True ) get_user_status_response: Final = ZCLCommandDef( id=0x0A, schema={"user_id": t.uint16_t, "user_status": UserStatus}, direction=True, ) set_week_day_schedule_response: Final = ZCLCommandDef( id=0x0B, schema={"status": foundation.Status}, direction=True ) get_week_day_schedule_response: Final = ZCLCommandDef( id=0x0C, schema={ "schedule_id": t.uint8_t, "user_id": t.uint16_t, "status": foundation.Status, "days_mask?": t.uint8_t, "start_hour?": t.uint8_t, "start_minute?": t.uint8_t, "end_hour?": t.uint8_t, "end_minute?": t.uint8_t, }, direction=True, ) clear_week_day_schedule_response: Final = ZCLCommandDef( id=0x0D, schema={"status": foundation.Status}, direction=True ) set_year_day_schedule_response: Final = ZCLCommandDef( id=0x0E, schema={"status": foundation.Status}, direction=True ) get_year_day_schedule_response: Final = ZCLCommandDef( id=0x0F, schema={ "schedule_id": t.uint8_t, "user_id": t.uint16_t, "status": foundation.Status, "local_start_time?": t.LocalTime, "local_end_time?": t.LocalTime, }, direction=True, ) clear_year_day_schedule_response: Final = ZCLCommandDef( id=0x10, schema={"status": foundation.Status}, direction=True ) set_holiday_schedule_response: Final = ZCLCommandDef( id=0x11, schema={"status": foundation.Status}, direction=True ) get_holiday_schedule_response: Final = ZCLCommandDef( id=0x12, schema={ "holiday_schedule_id": t.uint8_t, "status": foundation.Status, "local_start_time?": t.LocalTime, "local_end_time?": t.LocalTime, "operating_mode_during_holiday?": t.uint8_t, }, direction=True, ) clear_holiday_schedule_response: Final = ZCLCommandDef( id=0x13, schema={"status": foundation.Status}, direction=True ) set_user_type_response: Final = ZCLCommandDef( id=0x14, schema={"status": foundation.Status}, direction=True ) get_user_type_response: Final = ZCLCommandDef( id=0x15, schema={"user_id": t.uint16_t, "user_type": UserType}, direction=True, ) set_rfid_code_response: Final = ZCLCommandDef( id=0x16, schema={"status": foundation.Status}, direction=True ) get_rfid_code_response: Final = ZCLCommandDef( id=0x17, schema={ "user_id": t.uint16_t, "user_status": UserStatus, "user_type": UserType, "rfid_code": t.CharacterString, }, direction=True, ) clear_rfid_code_response: Final = ZCLCommandDef( id=0x18, schema={"status": foundation.Status}, direction=True ) clear_all_rfid_codes_response: Final = ZCLCommandDef( id=0x19, schema={"status": foundation.Status}, direction=True ) operation_event_notification: Final = ZCLCommandDef( id=0x20, schema={ "operation_event_source": OperationEventSource, "operation_event_code": OperationEvent, "user_id": t.uint16_t, "pin": t.CharacterString, "local_time": t.LocalTime, "data?": t.CharacterString, }, direction=False, ) programming_event_notification: Final = ZCLCommandDef( id=0x21, schema={ "program_event_source": OperationEventSource, "program_event_code": ProgrammingEvent, "user_id": t.uint16_t, "pin": t.CharacterString, "user_type": UserType, "user_status": UserStatus, "local_time": t.LocalTime, "data?": t.CharacterString, }, direction=False, ) class WindowCoveringType(t.enum8): Rollershade = 0x00 Rollershade_two_motors = 0x01 Rollershade_exterior = 0x02 Rollershade_exterior_two_motors = 0x03 Drapery = 0x04 Awning = 0x05 Shutter = 0x06 Tilt_blind_tilt_only = 0x07 Tilt_blind_tilt_and_lift = 0x08 Projector_screen = 0x09 class ConfigStatus(t.bitmap8): Operational = 0b00000001 Online = 0b00000010 Open_up_commands_reversed = 0b00000100 Closed_loop_lift_control = 0b00001000 Closed_loop_tilt_control = 0b00010000 Encoder_controlled_lift = 0b00100000 Encoder_controlled_tilt = 0b01000000 class WindowCoveringMode(t.bitmap8): Motor_direction_reversed = 0b00000001 Run_in_calibration_mode = 0b00000010 Motor_in_maintenance_mode = 0b00000100 LEDs_display_feedback = 0b00001000 class WindowCovering(Cluster): WindowCoveringType: Final = WindowCoveringType ConfigStatus: Final = ConfigStatus WindowCoveringMode: Final = WindowCoveringMode cluster_id: Final = 0x0102 name: Final = "Window Covering" ep_attribute: Final = "window_covering" class AttributeDefs(BaseAttributeDefs): # Window Covering Information window_covering_type: Final = ZCLAttributeDef( id=0x0000, type=WindowCoveringType, access="r", mandatory=True ) physical_closed_limit_lift: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r" ) physical_closed_limit_tilt: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r" ) current_position_lift: Final = ZCLAttributeDef( id=0x0003, type=t.uint16_t, access="r" ) current_position_tilt: Final = ZCLAttributeDef( id=0x0004, type=t.uint16_t, access="r" ) number_of_actuations_lift: Final = ZCLAttributeDef( id=0x0005, type=t.uint16_t, access="r" ) number_of_actuations_tilt: Final = ZCLAttributeDef( id=0x0006, type=t.uint16_t, access="r" ) config_status: Final = ZCLAttributeDef( id=0x0007, type=ConfigStatus, access="r", mandatory=True ) # All subsequent attributes are mandatory if their control types are enabled current_position_lift_percentage: Final = ZCLAttributeDef( id=0x0008, type=t.uint8_t, access="rps" ) current_position_tilt_percentage: Final = ZCLAttributeDef( id=0x0009, type=t.uint8_t, access="rps" ) # Window Covering Settings installed_open_limit_lift: Final = ZCLAttributeDef( id=0x0010, type=t.uint16_t, access="r" ) installed_closed_limit_lift: Final = ZCLAttributeDef( id=0x0011, type=t.uint16_t, access="r" ) installed_open_limit_tilt: Final = ZCLAttributeDef( id=0x0012, type=t.uint16_t, access="r" ) installed_closed_limit_tilt: Final = ZCLAttributeDef( id=0x0013, type=t.uint16_t, access="r" ) velocity_lift: Final = ZCLAttributeDef(id=0x0014, type=t.uint16_t, access="rw") acceleration_time_lift: Final = ZCLAttributeDef( id=0x0015, type=t.uint16_t, access="rw" ) deceleration_time_lift: Final = ZCLAttributeDef( id=0x0016, type=t.uint16_t, access="rw" ) window_covering_mode: Final = ZCLAttributeDef( id=0x0017, type=WindowCoveringMode, access="rw", mandatory=True ) intermediate_setpoints_lift: Final = ZCLAttributeDef( id=0x0018, type=t.LVBytes, access="rw" ) intermediate_setpoints_tilt: Final = ZCLAttributeDef( id=0x0019, type=t.LVBytes, access="rw" ) class ServerCommandDefs(BaseCommandDefs): up_open: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) down_close: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) stop: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) go_to_lift_value: Final = ZCLCommandDef( id=0x04, schema={"lift_value": t.uint16_t}, direction=False ) go_to_lift_percentage: Final = ZCLCommandDef( id=0x05, schema={"percentage_lift_value": t.uint8_t}, direction=False ) go_to_tilt_value: Final = ZCLCommandDef( id=0x07, schema={"tilt_value": t.uint16_t}, direction=False ) go_to_tilt_percentage: Final = ZCLCommandDef( id=0x08, schema={"percentage_tilt_value": t.uint8_t}, direction=False ) zigpy-0.62.3/zigpy/zcl/clusters/general.py000066400000000000000000002541021456054056700205510ustar00rootroot00000000000000"""General Functional Domain""" from __future__ import annotations from datetime import datetime from typing import Any, Final import zigpy.types as t from zigpy.typing import AddressingMode from zigpy.zcl import Cluster, foundation from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class PowerSource(t.enum8): """Power source enum.""" Unknown = 0x00 Mains_single_phase = 0x01 Mains_three_phase = 0x02 Battery = 0x03 DC_Source = 0x04 Emergency_Mains_Always_On = 0x05 Emergency_Mains_Transfer_Switch = 0x06 def __init__(self, *args, **kwargs): self.battery_backup = False @classmethod def deserialize(cls, data: bytes) -> tuple[bytes, bytes]: val, data = t.uint8_t.deserialize(data) r = cls(val & 0x7F) r.battery_backup = bool(val & 0x80) return r, data class PhysicalEnvironment(t.enum8): Unspecified_environment = 0x00 # Mirror Capacity Available: for 0x0109 Profile Id only; use 0x71 moving forward # Atrium: defined for legacy devices with non-0x0109 Profile Id; use 0x70 moving # forward # Note: This value is deprecated for Profile Id 0x0104. The value 0x01 is # maintained for historical purposes and SHOULD only be used for backwards # compatibility with devices developed before this specification. The 0x01 # value MUST be interpreted using the Profile Id of the endpoint upon # which it is implemented. For endpoints with the Smart Energy Profile Id # (0x0109) the value 0x01 has a meaning of Mirror. For endpoints with any # other profile identifier, the value 0x01 has a meaning of Atrium. Mirror_or_atrium_legacy = 0x01 Bar = 0x02 Courtyard = 0x03 Bathroom = 0x04 Bedroom = 0x05 Billiard_Room = 0x06 Utility_Room = 0x07 Cellar = 0x08 Storage_Closet = 0x09 Theater = 0x0A Office = 0x0B Deck = 0x0C Den = 0x0D Dining_Room = 0x0E Electrical_Room = 0x0F Elevator = 0x10 Entry = 0x11 Family_Room = 0x12 Main_Floor = 0x13 Upstairs = 0x14 Downstairs = 0x15 Basement = 0x16 Gallery = 0x17 Game_Room = 0x18 Garage = 0x19 Gym = 0x1A Hallway = 0x1B House = 0x1C Kitchen = 0x1D Laundry_Room = 0x1E Library = 0x1F Master_Bedroom = 0x20 Mud_Room_small_room_for_coats_and_boots = 0x21 Nursery = 0x22 Pantry = 0x23 Office_2 = 0x24 Outside = 0x25 Pool = 0x26 Porch = 0x27 Sewing_Room = 0x28 Sitting_Room = 0x29 Stairway = 0x2A Yard = 0x2B Attic = 0x2C Hot_Tub = 0x2D Living_Room = 0x2E Sauna = 0x2F Workshop = 0x30 Guest_Bedroom = 0x31 Guest_Bath = 0x32 Back_Yard = 0x34 Front_Yard = 0x35 Patio = 0x36 Driveway = 0x37 Sun_Room = 0x38 Grand_Room = 0x39 Spa = 0x3A Whirlpool = 0x3B Shed = 0x3C Equipment_Storage = 0x3D Craft_Room = 0x3E Fountain = 0x3F Pond = 0x40 Reception_Room = 0x41 Breakfast_Room = 0x42 Nook = 0x43 Garden = 0x44 Balcony = 0x45 Panic_Room = 0x46 Terrace = 0x47 Roof = 0x48 Toilet = 0x49 Toilet_Main = 0x4A Outside_Toilet = 0x4B Shower_room = 0x4C Study = 0x4D Front_Garden = 0x4E Back_Garden = 0x4F Kettle = 0x50 Television = 0x51 Stove = 0x52 Microwave = 0x53 Toaster = 0x54 Vacuum = 0x55 Appliance = 0x56 Front_Door = 0x57 Back_Door = 0x58 Fridge_Door = 0x59 Medication_Cabinet_Door = 0x60 Wardrobe_Door = 0x61 Front_Cupboard_Door = 0x62 Other_Door = 0x63 Waiting_Room = 0x64 Triage_Room = 0x65 Doctors_Office = 0x66 Patients_Private_Room = 0x67 Consultation_Room = 0x68 Nurse_Station = 0x69 Ward = 0x6A Corridor = 0x6B Operating_Theatre = 0x6C Dental_Surgery_Room = 0x6D Medical_Imaging_Room = 0x6E Decontamination_Room = 0x6F Atrium = 0x70 Mirror = 0x71 Unknown_environment = 0xFF class AlarmMask(t.bitmap8): General_hardware_fault = 0x01 General_software_fault = 0x02 class DisableLocalConfig(t.bitmap8): Reset = 0x01 Device_Configuration = 0x02 class GenericDeviceClass(t.enum8): Lighting = 0x00 class GenericLightingDeviceType(t.enum8): Incandescent = 0x00 Spotlight_Halogen = 0x01 Halogen_bulb = 0x02 CFL = 0x03 Linear_Fluorescent = 0x04 LED_bulb = 0x05 Spotlight_LED = 0x06 LED_strip = 0x07 LED_tube = 0x08 Generic_indoor_luminaire = 0x09 Generic_outdoor_luminaire = 0x0A Pendant_luminaire = 0x0B Floor_standing_luminaire = 0x0C Generic_Controller = 0xE0 Wall_Switch = 0xE1 Portable_remote_controller = 0xE2 Motion_sensor = 0xE3 # 0xe4 to 0xef Reserved Generic_actuator = 0xF0 Wall_socket = 0xF1 Gateway_Bridge = 0xF2 Plug_in_unit = 0xF3 Retrofit_actuator = 0xF4 Unspecified = 0xFF class Basic(Cluster): """Attributes for determining basic information about a device, setting user device information such as location, and enabling a device. """ PowerSource: Final = PowerSource PhysicalEnvironment: Final = PhysicalEnvironment AlarmMask: Final = AlarmMask DisableLocalConfig: Final = DisableLocalConfig GenericDeviceClass: Final = GenericDeviceClass GenericLightingDeviceType: Final = GenericLightingDeviceType cluster_id: Final = 0x0000 ep_attribute: Final = "basic" class AttributeDefs(BaseAttributeDefs): # Basic Device Information zcl_version: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="r", mandatory=True ) app_version: Final = ZCLAttributeDef(id=0x0001, type=t.uint8_t, access="r") stack_version: Final = ZCLAttributeDef(id=0x0002, type=t.uint8_t, access="r") hw_version: Final = ZCLAttributeDef(id=0x0003, type=t.uint8_t, access="r") manufacturer: Final = ZCLAttributeDef( id=0x0004, type=t.LimitedCharString(32), access="r" ) model: Final = ZCLAttributeDef( id=0x0005, type=t.LimitedCharString(32), access="r" ) date_code: Final = ZCLAttributeDef( id=0x0006, type=t.LimitedCharString(16), access="r" ) power_source: Final = ZCLAttributeDef( id=0x0007, type=PowerSource, access="r", mandatory=True ) generic_device_class: Final = ZCLAttributeDef( id=0x0008, type=GenericDeviceClass, access="r" ) # Lighting is the only non-reserved device type generic_device_type: Final = ZCLAttributeDef( id=0x0009, type=GenericLightingDeviceType, access="r" ) product_code: Final = ZCLAttributeDef(id=0x000A, type=t.LVBytes, access="r") product_url: Final = ZCLAttributeDef( id=0x000B, type=t.CharacterString, access="r" ) manufacturer_version_details: Final = ZCLAttributeDef( id=0x000C, type=t.CharacterString, access="r" ) serial_number: Final = ZCLAttributeDef( id=0x000D, type=t.CharacterString, access="r" ) product_label: Final = ZCLAttributeDef( id=0x000E, type=t.CharacterString, access="r" ) # Basic Device Settings location_desc: Final = ZCLAttributeDef( id=0x0010, type=t.LimitedCharString(16), access="rw" ) physical_env: Final = ZCLAttributeDef( id=0x0011, type=PhysicalEnvironment, access="rw" ) device_enabled: Final = ZCLAttributeDef(id=0x0012, type=t.Bool, access="rw") alarm_mask: Final = ZCLAttributeDef(id=0x0013, type=AlarmMask, access="rw") disable_local_config: Final = ZCLAttributeDef( id=0x0014, type=DisableLocalConfig, access="rw" ) sw_build_id: Final = ZCLAttributeDef( id=0x4000, type=t.CharacterString, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): reset_fact_default: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) class MainsAlarmMask(t.bitmap8): Voltage_Too_Low = 0b00000001 Voltage_Too_High = 0b00000010 Power_Supply_Unavailable = 0b00000100 class BatterySize(t.enum8): No_battery = 0x00 Built_in = 0x01 Other = 0x02 AA = 0x03 AAA = 0x04 C = 0x05 D = 0x06 CR2 = 0x07 CR123A = 0x08 Unknown = 0xFF class PowerConfiguration(Cluster): """Attributes for determining more detailed information about a device’s power source(s), and for configuring under/over voltage alarms. """ MainsAlarmMask: Final = MainsAlarmMask BatterySize: Final = BatterySize cluster_id: Final = 0x0001 name: Final = "Power Configuration" ep_attribute: Final = "power" class AttributeDefs(BaseAttributeDefs): # Mains Information mains_voltage: Final = ZCLAttributeDef(id=0x0000, type=t.uint16_t, access="r") mains_frequency: Final = ZCLAttributeDef(id=0x0001, type=t.uint8_t, access="r") # Mains Settings mains_alarm_mask: Final = ZCLAttributeDef( id=0x0010, type=MainsAlarmMask, access="rw" ) mains_volt_min_thres: Final = ZCLAttributeDef( id=0x0011, type=t.uint16_t, access="rw" ) mains_volt_max_thres: Final = ZCLAttributeDef( id=0x0012, type=t.uint16_t, access="rw" ) mains_voltage_dwell_trip_point: Final = ZCLAttributeDef( id=0x0013, type=t.uint16_t, access="rw" ) # Battery Information battery_voltage: Final = ZCLAttributeDef(id=0x0020, type=t.uint8_t, access="r") battery_percentage_remaining: Final = ZCLAttributeDef( id=0x0021, type=t.uint8_t, access="rp" ) # Battery Settings battery_manufacturer: Final = ZCLAttributeDef( id=0x0030, type=t.LimitedCharString(16), access="rw" ) battery_size: Final = ZCLAttributeDef(id=0x0031, type=BatterySize, access="rw") battery_a_hr_rating: Final = ZCLAttributeDef( id=0x0032, type=t.uint16_t, access="rw" ) # measured in units of 10mAHr battery_quantity: Final = ZCLAttributeDef( id=0x0033, type=t.uint8_t, access="rw" ) battery_rated_voltage: Final = ZCLAttributeDef( id=0x0034, type=t.uint8_t, access="rw" ) # measured in units of 100mV battery_alarm_mask: Final = ZCLAttributeDef( id=0x0035, type=t.bitmap8, access="rw" ) battery_volt_min_thres: Final = ZCLAttributeDef( id=0x0036, type=t.uint8_t, access="rw" ) battery_volt_thres1: Final = ZCLAttributeDef( id=0x0037, type=t.uint16_t, access="r*w" ) battery_volt_thres2: Final = ZCLAttributeDef( id=0x0038, type=t.uint16_t, access="r*w" ) battery_volt_thres3: Final = ZCLAttributeDef( id=0x0039, type=t.uint16_t, access="r*w" ) battery_percent_min_thres: Final = ZCLAttributeDef( id=0x003A, type=t.uint8_t, access="r*w" ) battery_percent_thres1: Final = ZCLAttributeDef( id=0x003B, type=t.uint8_t, access="r*w" ) battery_percent_thres2: Final = ZCLAttributeDef( id=0x003C, type=t.uint8_t, access="r*w" ) battery_percent_thres3: Final = ZCLAttributeDef( id=0x003D, type=t.uint8_t, access="r*w" ) battery_alarm_state: Final = ZCLAttributeDef( id=0x003E, type=t.bitmap32, access="rp" ) # Battery 2 Information battery_2_voltage: Final = ZCLAttributeDef( id=0x0040, type=t.uint8_t, access="r" ) battery_2_percentage_remaining: Final = ZCLAttributeDef( id=0x0041, type=t.uint8_t, access="rp" ) # Battery 2 Settings battery_2_manufacturer: Final = ZCLAttributeDef( id=0x0050, type=t.CharacterString, access="rw" ) battery_2_size: Final = ZCLAttributeDef( id=0x0051, type=BatterySize, access="rw" ) battery_2_a_hr_rating: Final = ZCLAttributeDef( id=0x0052, type=t.uint16_t, access="rw" ) battery_2_quantity: Final = ZCLAttributeDef( id=0x0053, type=t.uint8_t, access="rw" ) battery_2_rated_voltage: Final = ZCLAttributeDef( id=0x0054, type=t.uint8_t, access="rw" ) battery_2_alarm_mask: Final = ZCLAttributeDef( id=0x0055, type=t.bitmap8, access="rw" ) battery_2_volt_min_thres: Final = ZCLAttributeDef( id=0x0056, type=t.uint8_t, access="rw" ) battery_2_volt_thres1: Final = ZCLAttributeDef( id=0x0057, type=t.uint16_t, access="r*w" ) battery_2_volt_thres2: Final = ZCLAttributeDef( id=0x0058, type=t.uint16_t, access="r*w" ) battery_2_volt_thres3: Final = ZCLAttributeDef( id=0x0059, type=t.uint16_t, access="r*w" ) battery_2_percent_min_thres: Final = ZCLAttributeDef( id=0x005A, type=t.uint8_t, access="r*w" ) battery_2_percent_thres1: Final = ZCLAttributeDef( id=0x005B, type=t.uint8_t, access="r*w" ) battery_2_percent_thres2: Final = ZCLAttributeDef( id=0x005C, type=t.uint8_t, access="r*w" ) battery_2_percent_thres3: Final = ZCLAttributeDef( id=0x005D, type=t.uint8_t, access="r*w" ) battery_2_alarm_state: Final = ZCLAttributeDef( id=0x005E, type=t.bitmap32, access="rp" ) # Battery 3 Information battery_3_voltage: Final = ZCLAttributeDef( id=0x0060, type=t.uint8_t, access="r" ) battery_3_percentage_remaining: Final = ZCLAttributeDef( id=0x0061, type=t.uint8_t, access="rp" ) # Battery 3 Settings battery_3_manufacturer: Final = ZCLAttributeDef( id=0x0070, type=t.CharacterString, access="rw" ) battery_3_size: Final = ZCLAttributeDef( id=0x0071, type=BatterySize, access="rw" ) battery_3_a_hr_rating: Final = ZCLAttributeDef( id=0x0072, type=t.uint16_t, access="rw" ) battery_3_quantity: Final = ZCLAttributeDef( id=0x0073, type=t.uint8_t, access="rw" ) battery_3_rated_voltage: Final = ZCLAttributeDef( id=0x0074, type=t.uint8_t, access="rw" ) battery_3_alarm_mask: Final = ZCLAttributeDef( id=0x0075, type=t.bitmap8, access="rw" ) battery_3_volt_min_thres: Final = ZCLAttributeDef( id=0x0076, type=t.uint8_t, access="rw" ) battery_3_volt_thres1: Final = ZCLAttributeDef( id=0x0077, type=t.uint16_t, access="r*w" ) battery_3_volt_thres2: Final = ZCLAttributeDef( id=0x0078, type=t.uint16_t, access="r*w" ) battery_3_volt_thres3: Final = ZCLAttributeDef( id=0x0079, type=t.uint16_t, access="r*w" ) battery_3_percent_min_thres: Final = ZCLAttributeDef( id=0x007A, type=t.uint8_t, access="r*w" ) battery_3_percent_thres1: Final = ZCLAttributeDef( id=0x007B, type=t.uint8_t, access="r*w" ) battery_3_percent_thres2: Final = ZCLAttributeDef( id=0x007C, type=t.uint8_t, access="r*w" ) battery_3_percent_thres3: Final = ZCLAttributeDef( id=0x007D, type=t.uint8_t, access="r*w" ) battery_3_alarm_state: Final = ZCLAttributeDef( id=0x007E, type=t.bitmap32, access="rp" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class DeviceTempAlarmMask(t.bitmap8): Temp_too_low = 0b00000001 Temp_too_high = 0b00000010 class DeviceTemperature(Cluster): """Attributes for determining information about a device’s internal temperature, and for configuring under/over temperature alarms. """ DeviceTempAlarmMask: Final = DeviceTempAlarmMask cluster_id: Final = 0x0002 name: Final = "Device Temperature" ep_attribute: Final = "device_temperature" class AttributeDefs(BaseAttributeDefs): # Device Temperature Information current_temperature: Final = ZCLAttributeDef( id=0x0000, type=t.int16s, access="r", mandatory=True ) min_temp_experienced: Final = ZCLAttributeDef( id=0x0001, type=t.int16s, access="r" ) max_temp_experienced: Final = ZCLAttributeDef( id=0x0002, type=t.int16s, access="r" ) over_temp_total_dwell: Final = ZCLAttributeDef( id=0x0003, type=t.uint16_t, access="r" ) # Device Temperature Settings dev_temp_alarm_mask: Final = ZCLAttributeDef( id=0x0010, type=DeviceTempAlarmMask, access="rw" ) low_temp_thres: Final = ZCLAttributeDef(id=0x0011, type=t.int16s, access="rw") high_temp_thres: Final = ZCLAttributeDef(id=0x0012, type=t.int16s, access="rw") low_temp_dwell_trip_point: Final = ZCLAttributeDef( id=0x0013, type=t.uint24_t, access="rw" ) high_temp_dwell_trip_point: Final = ZCLAttributeDef( id=0x0014, type=t.uint24_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class EffectIdentifier(t.enum8): Blink = 0x00 Breathe = 0x01 Okay = 0x02 Channel_change = 0x03 Finish_effect = 0xFE Stop_effect = 0xFF class EffectVariant(t.enum8): Default = 0x00 class Identify(Cluster): """Attributes and commands for putting a device into Identification mode (e.g. flashing a light) """ EffectIdentifier: Final = EffectIdentifier EffectVariant: Final = EffectVariant cluster_id: Final = 0x0003 ep_attribute: Final = "identify" class AttributeDefs(BaseAttributeDefs): identify_time: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rw", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): identify: Final = ZCLCommandDef( id=0x00, schema={"identify_time": t.uint16_t}, direction=False ) identify_query: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) # 0x02: ("ezmode_invoke", (t.bitmap8,), False), # 0x03: ("update_commission_state", (t.bitmap8,), False), trigger_effect: Final = ZCLCommandDef( id=0x40, schema={"effect_id": EffectIdentifier, "effect_variant": EffectVariant}, direction=False, ) class ClientCommandDefs(BaseCommandDefs): identify_query_response: Final = ZCLCommandDef( id=0x00, schema={"timeout": t.uint16_t}, direction=True ) class NameSupport(t.bitmap8): Supported = 0b10000000 class Groups(Cluster): """Attributes and commands for group configuration and manipulation. """ NameSupport: Final = NameSupport cluster_id: Final = 0x0004 ep_attribute: Final = "groups" class AttributeDefs(BaseAttributeDefs): name_support: Final = ZCLAttributeDef( id=0x0000, type=NameSupport, access="r", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): add: Final = ZCLCommandDef( id=0x00, schema={"group_id": t.Group, "group_name": t.LimitedCharString(16)}, direction=False, ) view: Final = ZCLCommandDef( id=0x01, schema={"group_id": t.Group}, direction=False ) get_membership: Final = ZCLCommandDef( id=0x02, schema={"groups": t.LVList[t.Group]}, direction=False ) remove: Final = ZCLCommandDef( id=0x03, schema={"group_id": t.Group}, direction=False ) remove_all: Final = ZCLCommandDef(id=0x04, schema={}, direction=False) add_if_identifying: Final = ZCLCommandDef( id=0x05, schema={"group_id": t.Group, "group_name": t.LimitedCharString(16)}, direction=False, ) class ClientCommandDefs(BaseCommandDefs): add_response: Final = ZCLCommandDef( id=0x00, schema={"status": foundation.Status, "group_id": t.Group}, direction=True, ) view_response: Final = ZCLCommandDef( id=0x01, schema={ "status": foundation.Status, "group_id": t.Group, "group_name": t.LimitedCharString(16), }, direction=True, ) get_membership_response: Final = ZCLCommandDef( id=0x02, schema={"capacity": t.uint8_t, "groups": t.LVList[t.Group]}, direction=True, ) remove_response: Final = ZCLCommandDef( id=0x03, schema={"status": foundation.Status, "group_id": t.Group}, direction=True, ) class Scenes(Cluster): """Attributes and commands for scene configuration and manipulation. """ NameSupport: Final = NameSupport cluster_id: Final = 0x0005 ep_attribute: Final = "scenes" class AttributeDefs(BaseAttributeDefs): # Scene Management Information count: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="r", mandatory=True ) current_scene: Final = ZCLAttributeDef( id=0x0001, type=t.uint8_t, access="r", mandatory=True ) current_group: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) scene_valid: Final = ZCLAttributeDef( id=0x0003, type=t.Bool, access="r", mandatory=True ) name_support: Final = ZCLAttributeDef( id=0x0004, type=NameSupport, access="r", mandatory=True ) last_configured_by: Final = ZCLAttributeDef(id=0x0005, type=t.EUI64, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): add: Final = ZCLCommandDef( id=0x00, schema={ "group_id": t.Group, "scene_id": t.uint8_t, "transition_time": t.uint16_t, "scene_name": t.LimitedCharString(16), }, direction=False, ) # TODO: + extension field sets view: Final = ZCLCommandDef( id=0x01, schema={"group_id": t.Group, "scene_id": t.uint8_t}, direction=False, ) remove: Final = ZCLCommandDef( id=0x02, schema={"group_id": t.Group, "scene_id": t.uint8_t}, direction=False, ) remove_all: Final = ZCLCommandDef( id=0x03, schema={"group_id": t.Group}, direction=False ) store: Final = ZCLCommandDef( id=0x04, schema={"group_id": t.Group, "scene_id": t.uint8_t}, direction=False, ) recall: Final = ZCLCommandDef( id=0x05, schema={ "group_id": t.Group, "scene_id": t.uint8_t, "transition_time?": t.uint16_t, }, direction=False, ) get_scene_membership: Final = ZCLCommandDef( id=0x06, schema={"group_id": t.Group}, direction=False ) enhanced_add: Final = ZCLCommandDef( id=0x40, schema={ "group_id": t.Group, "scene_id": t.uint8_t, "transition_time": t.uint16_t, "scene_name": t.LimitedCharString(16), }, direction=False, ) enhanced_view: Final = ZCLCommandDef( id=0x41, schema={"group_id": t.Group, "scene_id": t.uint8_t}, direction=False, ) copy: Final = ZCLCommandDef( id=0x42, schema={ "mode": t.uint8_t, "group_id_from": t.uint16_t, "scene_id_from": t.uint8_t, "group_id_to": t.uint16_t, "scene_id_to": t.uint8_t, }, direction=False, ) class ClientCommandDefs(BaseCommandDefs): add_scene_response: Final = ZCLCommandDef( id=0x00, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, }, direction=True, ) view_response: Final = ZCLCommandDef( id=0x01, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, "transition_time?": t.uint16_t, "scene_name?": t.LimitedCharString(16), }, direction=True, ) # TODO: + extension field sets remove_scene_response: Final = ZCLCommandDef( id=0x02, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, }, direction=True, ) remove_all_scenes_response: Final = ZCLCommandDef( id=0x03, schema={"status": foundation.Status, "group_id": t.Group}, direction=True, ) store_scene_response: Final = ZCLCommandDef( id=0x04, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, }, direction=True, ) get_scene_membership_response: Final = ZCLCommandDef( id=0x06, schema={ "status": foundation.Status, "capacity": t.uint8_t, "group_id": t.Group, "scenes?": t.LVList[t.uint8_t], }, direction=True, ) enhanced_add_response: Final = ZCLCommandDef( id=0x40, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, }, direction=True, ) enhanced_view_response: Final = ZCLCommandDef( id=0x41, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, "transition_time?": t.uint16_t, "scene_name?": t.LimitedCharString(16), }, direction=True, ) # TODO: + extension field sets copy_response: Final = ZCLCommandDef( id=0x42, schema={ "status": foundation.Status, "group_id": t.Group, "scene_id": t.uint8_t, }, direction=True, ) class StartUpOnOff(t.enum8): Off = 0x00 On = 0x01 Toggle = 0x02 PreviousValue = 0xFF class OffEffectIdentifier(t.enum8): Delayed_All_Off = 0x00 Dying_Light = 0x01 class OnOffControl(t.bitmap8): Accept_Only_When_On = 0b00000001 class OnOff(Cluster): """Attributes and commands for switching devices between ‘On’ and ‘Off’ states. """ StartUpOnOff: Final = StartUpOnOff OffEffectIdentifier: Final = OffEffectIdentifier OnOffControl: Final = OnOffControl DELAYED_ALL_OFF_FADE_TO_OFF = 0x00 DELAYED_ALL_OFF_NO_FADE = 0x01 DELAYED_ALL_OFF_DIM_THEN_FADE_TO_OFF = 0x02 DYING_LIGHT_DIM_UP_THEN_FADE_TO_OFF = 0x00 cluster_id: Final = 0x0006 name: Final = "On/Off" ep_attribute: Final = "on_off" class AttributeDefs(BaseAttributeDefs): on_off: Final = ZCLAttributeDef( id=0x0000, type=t.Bool, access="rps", mandatory=True ) global_scene_control: Final = ZCLAttributeDef( id=0x4000, type=t.Bool, access="r" ) on_time: Final = ZCLAttributeDef(id=0x4001, type=t.uint16_t, access="rw") off_wait_time: Final = ZCLAttributeDef(id=0x4002, type=t.uint16_t, access="rw") start_up_on_off: Final = ZCLAttributeDef( id=0x4003, type=StartUpOnOff, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): off: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) on: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) toggle: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) off_with_effect: Final = ZCLCommandDef( id=0x40, schema={"effect_id": OffEffectIdentifier, "effect_variant": t.uint8_t}, direction=False, ) on_with_recall_global_scene: Final = ZCLCommandDef( id=0x41, schema={}, direction=False ) on_with_timed_off: Final = ZCLCommandDef( id=0x42, schema={ "on_off_control": OnOffControl, "on_time": t.uint16_t, "off_wait_time": t.uint16_t, }, direction=False, ) class SwitchType(t.enum8): Toggle = 0x00 Momentary = 0x01 Multifunction = 0x02 class SwitchActions(t.enum8): OnOff = 0x00 OffOn = 0x01 ToggleToggle = 0x02 class OnOffConfiguration(Cluster): """Attributes and commands for configuring On/Off switching devices""" SwitchType: Final = SwitchType SwitchActions: Final = SwitchActions cluster_id: Final = 0x0007 name: Final = "On/Off Switch Configuration" ep_attribute: Final = "on_off_config" class AttributeDefs(BaseAttributeDefs): switch_type: Final = ZCLAttributeDef( id=0x0000, type=SwitchType, access="r", mandatory=True ) switch_actions: Final = ZCLAttributeDef( id=0x0010, type=SwitchActions, access="rw", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class MoveMode(t.enum8): Up = 0x00 Down = 0x01 class StepMode(t.enum8): Up = 0x00 Down = 0x01 class Options(t.bitmap8): Execute_if_off = 0b00000001 Couple_color_temp_to_level = 0b00000010 class LevelControl(Cluster): """Attributes and commands for controlling devices that can be set to a level between fully ‘On’ and fully ‘Off’. """ MoveMode: Final = MoveMode StepMode: Final = StepMode Options: Final = Options cluster_id: Final = 0x0008 name: Final = "Level control" ep_attribute: Final = "level" class AttributeDefs(BaseAttributeDefs): current_level: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="rps", mandatory=True ) remaining_time: Final = ZCLAttributeDef(id=0x0001, type=t.uint16_t, access="r") min_level: Final = ZCLAttributeDef(id=0x0002, type=t.uint8_t, access="r") max_level: Final = ZCLAttributeDef(id=0x0003, type=t.uint8_t, access="r") current_frequency: Final = ZCLAttributeDef( id=0x0004, type=t.uint16_t, access="rps" ) min_frequency: Final = ZCLAttributeDef(id=0x0005, type=t.uint16_t, access="r") max_frequency: Final = ZCLAttributeDef(id=0x0006, type=t.uint16_t, access="r") options: Final = ZCLAttributeDef(id=0x000F, type=t.bitmap8, access="rw") on_off_transition_time: Final = ZCLAttributeDef( id=0x0010, type=t.uint16_t, access="rw" ) on_level: Final = ZCLAttributeDef(id=0x0011, type=t.uint8_t, access="rw") on_transition_time: Final = ZCLAttributeDef( id=0x0012, type=t.uint16_t, access="rw" ) off_transition_time: Final = ZCLAttributeDef( id=0x0013, type=t.uint16_t, access="rw" ) default_move_rate: Final = ZCLAttributeDef( id=0x0014, type=t.uint8_t, access="rw" ) start_up_current_level: Final = ZCLAttributeDef( id=0x4000, type=t.uint8_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): move_to_level: Final = ZCLCommandDef( id=0x00, schema={ "level": t.uint8_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move: Final = ZCLCommandDef( id=0x01, schema={ "move_mode": MoveMode, "rate": t.uint8_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) step: Final = ZCLCommandDef( id=0x02, schema={ "step_mode": StepMode, "step_size": t.uint8_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) stop: Final = ZCLCommandDef( id=0x03, schema={ "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_to_level_with_on_off: Final = ZCLCommandDef( id=0x04, schema={"level": t.uint8_t, "transition_time": t.uint16_t}, direction=False, ) move_with_on_off: Final = ZCLCommandDef( id=0x05, schema={"move_mode": MoveMode, "rate": t.uint8_t}, direction=False, ) step_with_on_off: Final = ZCLCommandDef( id=0x06, schema={ "step_mode": StepMode, "step_size": t.uint8_t, "transition_time": t.uint16_t, }, direction=False, ) stop_with_on_off: Final = ZCLCommandDef(id=0x07, schema={}, direction=False) move_to_closest_frequency: Final = ZCLCommandDef( id=0x08, schema={"frequency": t.uint16_t}, direction=False ) class Alarms(Cluster): """Attributes and commands for sending notifications and configuring alarm functionality. """ cluster_id: Final = 0x0009 ep_attribute: Final = "alarms" class AttributeDefs(BaseAttributeDefs): alarm_count: Final = ZCLAttributeDef(id=0x0000, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): reset_alarm: Final = ZCLCommandDef( id=0x00, schema={"alarm_code": t.uint8_t, "cluster_id": t.uint16_t}, direction=False, ) reset_all_alarms: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) get_alarm: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) reset_alarm_log: Final = ZCLCommandDef(id=0x03, schema={}, direction=False) # 0x04: ("publish_event_log", {}, False), class ClientCommandDefs(BaseCommandDefs): alarm: Final = ZCLCommandDef( id=0x00, schema={"alarm_code": t.uint8_t, "cluster_id": t.uint16_t}, direction=False, ) get_alarm_response: Final = ZCLCommandDef( id=0x01, schema={ "status": foundation.Status, "alarm_code?": t.uint8_t, "cluster_id?": t.uint16_t, "timestamp?": t.uint32_t, }, direction=True, ) # 0x02: ("get_event_log", {}, False), class TimeStatus(t.bitmap8): Master = 0b00000001 Synchronized = 0b00000010 Master_for_Zone_and_DST = 0b00000100 Superseding = 0b00001000 class Time(Cluster): """Attributes and commands that provide a basic interface to a real-time clock. """ TimeStatus: Final = TimeStatus cluster_id: Final = 0x000A ep_attribute: Final = "time" class AttributeDefs(BaseAttributeDefs): time: Final = ZCLAttributeDef( id=0x0000, type=t.UTCTime, access="r*w", mandatory=True ) time_status: Final = ZCLAttributeDef( id=0x0001, type=t.bitmap8, access="r*w", mandatory=True ) time_zone: Final = ZCLAttributeDef(id=0x0002, type=t.int32s, access="rw") dst_start: Final = ZCLAttributeDef(id=0x0003, type=t.uint32_t, access="rw") dst_end: Final = ZCLAttributeDef(id=0x0004, type=t.uint32_t, access="rw") dst_shift: Final = ZCLAttributeDef(id=0x0005, type=t.int32s, access="rw") standard_time: Final = ZCLAttributeDef( id=0x0006, type=t.StandardTime, access="r" ) local_time: Final = ZCLAttributeDef(id=0x0007, type=t.LocalTime, access="r") last_set_time: Final = ZCLAttributeDef(id=0x0008, type=t.UTCTime, access="r") valid_until_time: Final = ZCLAttributeDef( id=0x0009, type=t.UTCTime, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR def handle_cluster_general_request( self, hdr: foundation.ZCLHeader, *args: list[Any], dst_addressing: AddressingMode | None = None, ): if hdr.command_id == foundation.GeneralCommand.Read_Attributes: data = {} for attr in args[0][0]: if attr == 0: epoch = datetime(2000, 1, 1, 0, 0, 0, 0) diff = datetime.utcnow() - epoch data[attr] = diff.total_seconds() elif attr == 1: data[attr] = 7 elif attr == 2: diff = datetime.fromtimestamp(86400) - datetime.utcfromtimestamp( 86400 ) data[attr] = diff.total_seconds() elif attr == 7: epoch = datetime(2000, 1, 1, 0, 0, 0, 0) diff = datetime.now() - epoch data[attr] = diff.total_seconds() else: data[attr] = None self.create_catching_task(self.read_attributes_rsp(data, tsn=hdr.tsn)) class LocationMethod(t.enum8): Lateration = 0x00 Signposting = 0x01 RF_fingerprinting = 0x02 Out_of_band = 0x03 Centralized = 0x04 class NeighborInfo(t.Struct): neighbor: t.EUI64 x: t.int16s y: t.int16s z: t.int16s rssi: t.int8s num_measurements: t.uint8_t class RSSILocation(Cluster): """Attributes and commands that provide a means for exchanging location information and channel parameters among devices. """ LocationMethod: Final = LocationMethod NeighborInfo: Final = NeighborInfo cluster_id: Final = 0x000B ep_attribute: Final = "rssi_location" class AttributeDefs(BaseAttributeDefs): # Location Information type: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="rw", mandatory=True ) method: Final = ZCLAttributeDef( id=0x0001, type=LocationMethod, access="rw", mandatory=True ) age: Final = ZCLAttributeDef(id=0x0002, type=t.uint16_t, access="r") quality_measure: Final = ZCLAttributeDef(id=0x0003, type=t.uint8_t, access="r") num_of_devices: Final = ZCLAttributeDef(id=0x0004, type=t.uint8_t, access="r") # Location Settings coordinate1: Final = ZCLAttributeDef( id=0x0010, type=t.int16s, access="rw", mandatory=True ) coordinate2: Final = ZCLAttributeDef( id=0x0011, type=t.int16s, access="rw", mandatory=True ) coordinate3: Final = ZCLAttributeDef(id=0x0012, type=t.int16s, access="rw") power: Final = ZCLAttributeDef( id=0x0013, type=t.int16s, access="rw", mandatory=True ) path_loss_exponent: Final = ZCLAttributeDef( id=0x0014, type=t.uint16_t, access="rw", mandatory=True ) reporting_period: Final = ZCLAttributeDef( id=0x0015, type=t.uint16_t, access="rw" ) calculation_period: Final = ZCLAttributeDef( id=0x0016, type=t.uint16_t, access="rw" ) number_rssi_measurements: Final = ZCLAttributeDef( id=0x0017, type=t.uint8_t, access="rw", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): set_absolute_location: Final = ZCLCommandDef( id=0x00, schema={ "coordinate1": t.int16s, "coordinate2": t.int16s, "coordinate3": t.int16s, "power": t.int16s, "path_loss_exponent": t.uint16_t, }, direction=False, ) set_dev_config: Final = ZCLCommandDef( id=0x01, schema={ "power": t.int16s, "path_loss_exponent": t.uint16_t, "calculation_period": t.uint16_t, "num_rssi_measurements": t.uint8_t, "reporting_period": t.uint16_t, }, direction=False, ) get_dev_config: Final = ZCLCommandDef( id=0x02, schema={"target_addr": t.EUI64}, direction=False ) get_location_data: Final = ZCLCommandDef( id=0x03, schema={ "packed": t.bitmap8, "num_responses": t.uint8_t, "target_addr": t.EUI64, }, direction=False, ) rssi_response: Final = ZCLCommandDef( id=0x04, schema={ "replying_device": t.EUI64, "x": t.int16s, "y": t.int16s, "z": t.int16s, "rssi": t.int8s, "num_rssi_measurements": t.uint8_t, }, direction=True, ) send_pings: Final = ZCLCommandDef( id=0x05, schema={ "target_addr": t.EUI64, "num_rssi_measurements": t.uint8_t, "calculation_period": t.uint16_t, }, direction=False, ) anchor_node_announce: Final = ZCLCommandDef( id=0x06, schema={ "anchor_node_ieee_addr": t.EUI64, "x": t.int16s, "y": t.int16s, "z": t.int16s, }, direction=False, ) class ClientCommandDefs(BaseCommandDefs): dev_config_response: Final = ZCLCommandDef( id=0x00, schema={ "status": foundation.Status, "power?": t.int16s, "path_loss_exponent?": t.uint16_t, "calculation_period?": t.uint16_t, "num_rssi_measurements?": t.uint8_t, "reporting_period?": t.uint16_t, }, direction=True, ) location_data_response: Final = ZCLCommandDef( id=0x01, schema={ "status": foundation.Status, "location_type?": t.uint8_t, "coordinate1?": t.int16s, "coordinate2?": t.int16s, "coordinate3?": t.int16s, "power?": t.uint16_t, "path_loss_exponent?": t.uint8_t, "location_method?": t.uint8_t, "quality_measure?": t.uint8_t, "location_age?": t.uint16_t, }, direction=True, ) location_data_notification: Final = ZCLCommandDef( id=0x02, schema={}, direction=False ) compact_location_data_notification: Final = ZCLCommandDef( id=0x03, schema={}, direction=False ) rssi_ping: Final = ZCLCommandDef( id=0x04, schema={"location_type": t.uint8_t}, direction=False ) rssi_req: Final = ZCLCommandDef(id=0x05, schema={}, direction=False) report_rssi_measurements: Final = ZCLCommandDef( id=0x06, schema={ "measuring_device": t.EUI64, "neighbors": t.LVList[NeighborInfo], }, direction=False, ) request_own_location: Final = ZCLCommandDef( id=0x07, schema={"ieee_of_blind_node": t.EUI64}, direction=False ) class Reliability(t.enum8): No_fault_detected = 0 No_sensor = 1 Over_range = 2 Under_range = 3 Open_loop = 4 Shorted_loop = 5 No_output = 6 Unreliable_other = 7 Process_error = 8 Multi_state_fault = 9 Configuration_error = 10 class AnalogInput(Cluster): Reliability: Final = Reliability cluster_id: Final = 0x000C ep_attribute: Final = "analog_input" class AttributeDefs(BaseAttributeDefs): description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) max_present_value: Final = ZCLAttributeDef( id=0x0041, type=t.Single, access="r*w" ) min_present_value: Final = ZCLAttributeDef( id=0x0045, type=t.Single, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="rwp", mandatory=True ) reliability: Final = ZCLAttributeDef(id=0x0067, type=Reliability, access="r*w") resolution: Final = ZCLAttributeDef(id=0x006A, type=t.Single, access="r*w") status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="rp", mandatory=True ) engineering_units: Final = ZCLAttributeDef( id=0x0075, type=t.enum16, access="r*w" ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class AnalogOutput(Cluster): cluster_id: Final = 0x000D ep_attribute: Final = "analog_output" class AttributeDefs(BaseAttributeDefs): description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) max_present_value: Final = ZCLAttributeDef( id=0x0041, type=t.Single, access="r*w" ) min_present_value: Final = ZCLAttributeDef( id=0x0045, type=t.Single, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="rwp", mandatory=True ) # 0x0057: ZCLAttributeDef('priority_array', type=TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") relinquish_default: Final = ZCLAttributeDef( id=0x0068, type=t.Single, access="r*w" ) resolution: Final = ZCLAttributeDef(id=0x006A, type=t.Single, access="r*w") status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="rp", mandatory=True ) engineering_units: Final = ZCLAttributeDef( id=0x0075, type=t.enum16, access="r*w" ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class AnalogValue(Cluster): cluster_id: Final = 0x000E ep_attribute: Final = "analog_value" class AttributeDefs(BaseAttributeDefs): description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="rw", mandatory=True ) # 0x0057: ('priority_array', TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") relinquish_default: Final = ZCLAttributeDef( id=0x0068, type=t.Single, access="r*w" ) status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) engineering_units: Final = ZCLAttributeDef( id=0x0075, type=t.enum16, access="r*w" ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class BinaryInput(Cluster): cluster_id: Final = 0x000F name: Final = "Binary Input (Basic)" ep_attribute: Final = "binary_input" class AttributeDefs(BaseAttributeDefs): active_text: Final = ZCLAttributeDef( id=0x0004, type=t.CharacterString, access="r*w" ) description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) inactive_text: Final = ZCLAttributeDef( id=0x002E, type=t.CharacterString, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) polarity: Final = ZCLAttributeDef(id=0x0054, type=t.enum8, access="r") present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Bool, access="r*w", mandatory=True ) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class BinaryOutput(Cluster): cluster_id: Final = 0x0010 ep_attribute: Final = "binary_output" class AttributeDefs(BaseAttributeDefs): active_text: Final = ZCLAttributeDef( id=0x0004, type=t.CharacterString, access="r*w" ) description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) inactive_text: Final = ZCLAttributeDef( id=0x002E, type=t.CharacterString, access="r*w" ) minimum_off_time: Final = ZCLAttributeDef( id=0x0042, type=t.uint32_t, access="r*w" ) minimum_on_time: Final = ZCLAttributeDef( id=0x0043, type=t.uint32_t, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) polarity: Final = ZCLAttributeDef(id=0x0054, type=t.enum8, access="r") present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Bool, access="r*w", mandatory=True ) # 0x0057: ('priority_array', TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") relinquish_default: Final = ZCLAttributeDef( id=0x0068, type=t.Bool, access="r*w" ) resolution: Final = ZCLAttributeDef( id=0x006A, type=t.Single, access="r" ) # Does not seem to be in binary_output status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) engineering_units: Final = ZCLAttributeDef( id=0x0075, type=t.enum16, access="r" ) # Does not seem to be in binary_output application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class BinaryValue(Cluster): cluster_id: Final = 0x0011 ep_attribute: Final = "binary_value" class AttributeDefs(BaseAttributeDefs): active_text: Final = ZCLAttributeDef( id=0x0004, type=t.CharacterString, access="r*w" ) description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) inactive_text: Final = ZCLAttributeDef( id=0x002E, type=t.CharacterString, access="r*w" ) minimum_off_time: Final = ZCLAttributeDef( id=0x0042, type=t.uint32_t, access="r*w" ) minimum_on_time: Final = ZCLAttributeDef( id=0x0043, type=t.uint32_t, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="r*w", mandatory=True ) # 0x0057: ZCLAttributeDef('priority_array', type=TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") relinquish_default: Final = ZCLAttributeDef( id=0x0068, type=t.Single, access="r*w" ) status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class MultistateInput(Cluster): cluster_id: Final = 0x0012 ep_attribute: Final = "multistate_input" class AttributeDefs(BaseAttributeDefs): state_text: Final = ZCLAttributeDef( id=0x000E, type=t.List[t.CharacterString], access="r*w" ) description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) number_of_states: Final = ZCLAttributeDef( id=0x004A, type=t.uint16_t, access="r*w" ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="r*w", mandatory=True ) # 0x0057: ('priority_array', TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class MultistateOutput(Cluster): cluster_id: Final = 0x0013 ep_attribute: Final = "multistate_output" class AttributeDefs(BaseAttributeDefs): state_text: Final = ZCLAttributeDef( id=0x000E, type=t.List[t.CharacterString], access="r*w" ) description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) number_of_states: Final = ZCLAttributeDef( id=0x004A, type=t.uint16_t, access="r*w", mandatory=True ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="r*w", mandatory=True ) # 0x0057: ZCLAttributeDef('priority_array', type=TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") relinquish_default: Final = ZCLAttributeDef( id=0x0068, type=t.Single, access="r*w" ) status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class MultistateValue(Cluster): cluster_id: Final = 0x0014 ep_attribute: Final = "multistate_value" class AttributeDefs(BaseAttributeDefs): state_text: Final = ZCLAttributeDef( id=0x000E, type=t.List[t.CharacterString], access="r*w" ) description: Final = ZCLAttributeDef( id=0x001C, type=t.CharacterString, access="r*w" ) number_of_states: Final = ZCLAttributeDef( id=0x004A, type=t.uint16_t, access="r*w", mandatory=True ) out_of_service: Final = ZCLAttributeDef( id=0x0051, type=t.Bool, access="r*w", mandatory=True ) present_value: Final = ZCLAttributeDef( id=0x0055, type=t.Single, access="r*w", mandatory=True ) # 0x0057: ZCLAttributeDef('priority_array', type=TODO.array), # Array of 16 structures of (boolean, # single precision) reliability: Final = ZCLAttributeDef(id=0x0067, type=t.enum8, access="r*w") relinquish_default: Final = ZCLAttributeDef( id=0x0068, type=t.Single, access="r*w" ) status_flags: Final = ZCLAttributeDef( id=0x006F, type=t.bitmap8, access="r", mandatory=True ) application_type: Final = ZCLAttributeDef( id=0x0100, type=t.uint32_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class StartupControl(t.enum8): Part_of_network = 0x00 Form_network = 0x01 Rejoin_network = 0x02 Start_from_scratch = 0x03 class NetworkKeyType(t.enum8): Standard = 0x01 class Commissioning(Cluster): """Attributes and commands for commissioning and managing a ZigBee device. """ StartupControl: Final = StartupControl NetworkKeyType: Final = NetworkKeyType cluster_id: Final = 0x0015 ep_attribute: Final = "commissioning" class AttributeDefs(BaseAttributeDefs): # Startup Parameters short_address: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rw", mandatory=True ) extended_pan_id: Final = ZCLAttributeDef( id=0x0001, type=t.EUI64, access="rw", mandatory=True ) pan_id: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="rw", mandatory=True ) channel_mask: Final = ZCLAttributeDef( id=0x0003, type=t.Channels, access="rw", mandatory=True ) protocol_version: Final = ZCLAttributeDef( id=0x0004, type=t.uint8_t, access="rw", mandatory=True ) stack_profile: Final = ZCLAttributeDef( id=0x0005, type=t.uint8_t, access="rw", mandatory=True ) startup_control: Final = ZCLAttributeDef( id=0x0006, type=StartupControl, access="rw", mandatory=True ) trust_center_address: Final = ZCLAttributeDef( id=0x0010, type=t.EUI64, access="rw", mandatory=True ) trust_center_master_key: Final = ZCLAttributeDef( id=0x0011, type=t.KeyData, access="rw" ) network_key: Final = ZCLAttributeDef( id=0x0012, type=t.KeyData, access="rw", mandatory=True ) use_insecure_join: Final = ZCLAttributeDef( id=0x0013, type=t.Bool, access="rw", mandatory=True ) preconfigured_link_key: Final = ZCLAttributeDef( id=0x0014, type=t.KeyData, access="rw", mandatory=True ) network_key_seq_num: Final = ZCLAttributeDef( id=0x0015, type=t.uint8_t, access="rw", mandatory=True ) network_key_type: Final = ZCLAttributeDef( id=0x0016, type=NetworkKeyType, access="rw", mandatory=True ) network_manager_address: Final = ZCLAttributeDef( id=0x0017, type=t.uint16_t, access="rw", mandatory=True ) # Join Parameters scan_attempts: Final = ZCLAttributeDef(id=0x0020, type=t.uint8_t, access="rw") time_between_scans: Final = ZCLAttributeDef( id=0x0021, type=t.uint16_t, access="rw" ) rejoin_interval: Final = ZCLAttributeDef( id=0x0022, type=t.uint16_t, access="rw" ) max_rejoin_interval: Final = ZCLAttributeDef( id=0x0023, type=t.uint16_t, access="rw" ) # End Device Parameters indirect_poll_rate: Final = ZCLAttributeDef( id=0x0030, type=t.uint16_t, access="rw" ) parent_retry_threshold: Final = ZCLAttributeDef( id=0x0031, type=t.uint8_t, access="r" ) # Concentrator Parameters concentrator_flag: Final = ZCLAttributeDef(id=0x0040, type=t.Bool, access="rw") concentrator_radius: Final = ZCLAttributeDef( id=0x0041, type=t.uint8_t, access="rw" ) concentrator_discovery_time: Final = ZCLAttributeDef( id=0x0042, type=t.uint8_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): restart_device: Final = ZCLCommandDef( id=0x00, schema={"options": t.bitmap8, "delay": t.uint8_t, "jitter": t.uint8_t}, direction=False, ) save_startup_parameters: Final = ZCLCommandDef( id=0x01, schema={"options": t.bitmap8, "index": t.uint8_t}, direction=False, ) restore_startup_parameters: Final = ZCLCommandDef( id=0x02, schema={"options": t.bitmap8, "index": t.uint8_t}, direction=False, ) reset_startup_parameters: Final = ZCLCommandDef( id=0x03, schema={"options": t.bitmap8, "index": t.uint8_t}, direction=False, ) class ClientCommandDefs(BaseCommandDefs): restart_device_response: Final = ZCLCommandDef( id=0x00, schema={"status": foundation.Status}, direction=True ) save_startup_params_response: Final = ZCLCommandDef( id=0x01, schema={"status": foundation.Status}, direction=True ) restore_startup_params_response: Final = ZCLCommandDef( id=0x02, schema={"status": foundation.Status}, direction=True ) reset_startup_params_response: Final = ZCLCommandDef( id=0x03, schema={"status": foundation.Status}, direction=True ) class Partition(Cluster): cluster_id: Final = 0x0016 ep_attribute: Final = "partition" class AttributeDefs(BaseAttributeDefs): maximum_incoming_transfer_size: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="r", mandatory=True, ) maximum_outgoing_transfer_size: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True, ) partitioned_frame_size: Final = ZCLAttributeDef( id=0x0002, type=t.uint8_t, access="rw", mandatory=True ) large_frame_size: Final = ZCLAttributeDef( id=0x0003, type=t.uint16_t, access="rw", mandatory=True ) number_of_ack_frame: Final = ZCLAttributeDef( id=0x0004, type=t.uint8_t, access="rw", mandatory=True ) nack_timeout: Final = ZCLAttributeDef( id=0x0005, type=t.uint16_t, access="r", mandatory=True ) interframe_delay: Final = ZCLAttributeDef( id=0x0006, type=t.uint8_t, access="rw", mandatory=True ) number_of_send_retries: Final = ZCLAttributeDef( id=0x0007, type=t.uint8_t, access="r", mandatory=True ) sender_timeout: Final = ZCLAttributeDef( id=0x0008, type=t.uint16_t, access="r", mandatory=True ) receiver_timeout: Final = ZCLAttributeDef( id=0x0009, type=t.uint16_t, access="r", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ImageUpgradeStatus(t.enum8): Normal = 0x00 Download_in_progress = 0x01 Download_complete = 0x02 Waiting_to_upgrade = 0x03 Count_down = 0x04 Wait_for_more = 0x05 Waiting_to_Upgrade_via_External_Event = 0x06 class UpgradeActivationPolicy(t.enum8): OTA_server_allowed = 0x00 Out_of_band_allowed = 0x01 class UpgradeTimeoutPolicy(t.enum8): Apply_after_timeout = 0x00 Do_not_apply_after_timeout = 0x01 class ImageNotifyCommand(foundation.CommandSchema): class PayloadType(t.enum8): QueryJitter = 0x00 QueryJitter_ManufacturerCode = 0x01 QueryJitter_ManufacturerCode_ImageType = 0x02 QueryJitter_ManufacturerCode_ImageType_NewFileVersion = 0x03 payload_type: None = t.StructField(type=PayloadType) query_jitter: t.uint8_t manufacturer_code: t.uint16_t = t.StructField( requires=( lambda s: s.payload_type >= s.PayloadType.QueryJitter_ManufacturerCode ) ) image_type: t.uint16_t = t.StructField( requires=( lambda s: s.payload_type >= s.PayloadType.QueryJitter_ManufacturerCode_ImageType ) ) new_file_version: t.uint32_t = t.StructField( requires=( lambda s: s.payload_type >= s.PayloadType.QueryJitter_ManufacturerCode_ImageType_NewFileVersion ) ) class QueryNextImageCommand(foundation.CommandSchema): class FieldControl(t.bitmap8): HardwareVersion = 0b00000001 field_control: None = t.StructField(type=FieldControl) manufacturer_code: t.uint16_t image_type: t.uint16_t current_file_version: t.uint32_t hardware_version: t.uint16_t = t.StructField( requires=(lambda s: s.field_control & s.FieldControl.HardwareVersion) ) class ImageBlockCommand(foundation.CommandSchema): class FieldControl(t.bitmap8): RequestNodeAddr = 0b00000001 MinimumBlockPeriod = 0b00000010 field_control: None = t.StructField(type=FieldControl) manufacturer_code: t.uint16_t image_type: t.uint16_t file_version: t.uint32_t file_offset: t.uint32_t maximum_data_size: t.uint8_t request_node_addr: t.EUI64 = t.StructField( requires=(lambda s: s.field_control & s.FieldControl.RequestNodeAddr) ) minimum_block_period: t.uint16_t = t.StructField( requires=(lambda s: s.field_control & s.FieldControl.MinimumBlockPeriod) ) class ImagePageCommand(foundation.CommandSchema): class FieldControl(t.bitmap8): RequestNodeAddr = 0b00000001 field_control: None = t.StructField(type=FieldControl) manufacturer_code: t.uint16_t image_type: t.uint16_t file_version: t.uint32_t file_offset: t.uint32_t maximum_data_size: t.uint8_t page_size: t.uint16_t response_spacing: t.uint16_t request_node_addr: t.EUI64 = t.StructField( requires=lambda s: s.field_control & s.FieldControl.RequestNodeAddr ) class ImageBlockResponseCommand(foundation.CommandSchema): # All responses contain at least a status status: foundation.Status # Payload with `SUCCESS` status manufacturer_code: t.uint16_t = t.StructField( requires=lambda s: s.status == foundation.Status.SUCCESS ) image_type: t.uint16_t = t.StructField( requires=lambda s: s.status == foundation.Status.SUCCESS ) file_version: t.uint32_t = t.StructField( requires=lambda s: s.status == foundation.Status.SUCCESS ) file_offset: t.uint32_t = t.StructField( requires=lambda s: s.status == foundation.Status.SUCCESS ) image_data: t.LVBytes = t.StructField( requires=lambda s: s.status == foundation.Status.SUCCESS ) # Payload with `WAIT_FOR_DATA` status current_time: t.UTCTime = t.StructField( requires=lambda s: s.status == foundation.Status.WAIT_FOR_DATA ) request_time: t.UTCTime = t.StructField( requires=lambda s: s.status == foundation.Status.WAIT_FOR_DATA ) minimum_block_period: t.uint16_t = t.StructField( requires=lambda s: s.status == foundation.Status.WAIT_FOR_DATA ) class Ota(Cluster): ImageUpgradeStatus: Final = ImageUpgradeStatus UpgradeActivationPolicy: Final = UpgradeActivationPolicy UpgradeTimeoutPolicy: Final = UpgradeTimeoutPolicy ImageNotifyCommand: Final = ImageNotifyCommand QueryNextImageCommand: Final = QueryNextImageCommand ImageBlockCommand: Final = ImageBlockCommand ImagePageCommand: Final = ImagePageCommand ImageBlockResponseCommand: Final = ImageBlockResponseCommand cluster_id: Final = 0x0019 ep_attribute: Final = "ota" class AttributeDefs(BaseAttributeDefs): upgrade_server_id: Final = ZCLAttributeDef( id=0x0000, type=t.EUI64, access="r", mandatory=True ) file_offset: Final = ZCLAttributeDef(id=0x0001, type=t.uint32_t, access="r") current_file_version: Final = ZCLAttributeDef( id=0x0002, type=t.uint32_t, access="r" ) current_zigbee_stack_version: Final = ZCLAttributeDef( id=0x0003, type=t.uint16_t, access="r" ) downloaded_file_version: Final = ZCLAttributeDef( id=0x0004, type=t.uint32_t, access="r" ) downloaded_zigbee_stack_version: Final = ZCLAttributeDef( id=0x0005, type=t.uint16_t, access="r" ) image_upgrade_status: Final = ZCLAttributeDef( id=0x0006, type=ImageUpgradeStatus, access="r", mandatory=True ) manufacturer_id: Final = ZCLAttributeDef(id=0x0007, type=t.uint16_t, access="r") image_type_id: Final = ZCLAttributeDef(id=0x0008, type=t.uint16_t, access="r") minimum_block_req_delay: Final = ZCLAttributeDef( id=0x0009, type=t.uint16_t, access="r" ) image_stamp: Final = ZCLAttributeDef(id=0x000A, type=t.uint32_t, access="r") upgrade_activation_policy: Final = ZCLAttributeDef( id=0x000B, type=UpgradeActivationPolicy, access="r" ) upgrade_timeout_policy: Final = ZCLAttributeDef( id=0x000C, type=UpgradeTimeoutPolicy, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): query_next_image: Final = ZCLCommandDef( id=0x01, schema=QueryNextImageCommand, direction=False ) image_block: Final = ZCLCommandDef( id=0x03, schema=ImageBlockCommand, direction=False ) image_page: Final = ZCLCommandDef( id=0x04, schema=ImagePageCommand, direction=False ) upgrade_end: Final = ZCLCommandDef( id=0x06, schema={ "status": foundation.Status, "manufacturer_code": t.uint16_t, "image_type": t.uint16_t, "file_version": t.uint32_t, }, direction=False, ) query_specific_file: Final = ZCLCommandDef( id=0x08, schema={ "request_node_addr": t.EUI64, "manufacturer_code": t.uint16_t, "image_type": t.uint16_t, "file_version": t.uint32_t, "current_zigbee_stack_version": t.uint16_t, }, direction=False, ) class ClientCommandDefs(BaseCommandDefs): image_notify: Final = ZCLCommandDef( id=0x00, schema=ImageNotifyCommand, direction=False ) query_next_image_response: Final = ZCLCommandDef( id=0x02, schema={ "status": foundation.Status, "manufacturer_code?": t.uint16_t, "image_type?": t.uint16_t, "file_version?": t.uint32_t, "image_size?": t.uint32_t, }, direction=True, ) image_block_response: Final = ZCLCommandDef( id=0x05, schema=ImageBlockResponseCommand, direction=True, ) upgrade_end_response: Final = ZCLCommandDef( id=0x07, schema={ "manufacturer_code": t.uint16_t, "image_type": t.uint16_t, "file_version": t.uint32_t, "current_time": t.UTCTime, "upgrade_time": t.UTCTime, }, direction=True, ) query_specific_file_response: Final = ZCLCommandDef( id=0x09, schema={ "status": foundation.Status, "manufacturer_code?": t.uint16_t, "image_type?": t.uint16_t, "file_version?": t.uint32_t, "image_size?": t.uint32_t, }, direction=True, ) def handle_cluster_request( self, hdr: foundation.ZCLHeader, args: list[Any], *, dst_addressing: AddressingMode | None = None, ): self.create_catching_task( self._handle_cluster_request(hdr, args, dst_addressing=dst_addressing), ) async def _handle_cluster_request( self, hdr: foundation.ZCLHeader, args: list[Any], *, dst_addressing: AddressingMode | None = None, ): """Parse OTA commands.""" tsn, command_id = hdr.tsn, hdr.command_id try: cmd_name = self.server_commands[command_id].name except KeyError: self.warning("Unknown OTA command id %d (%s)", command_id, args) return if cmd_name == "query_next_image": await self._handle_query_next_image( *args, tsn=tsn, model=self.endpoint.model ) else: self.debug( "no '%s' OTA command handler for '%s %s': %s", cmd_name, self.endpoint.manufacturer, self.endpoint.model, args, ) async def _handle_query_next_image( self, field_ctrl, manufacturer_id, image_type, current_file_version, hardware_version, *, tsn, model=None, ): # we don't want the cluster to do anything here because it would interfere with the OTA manager if self.endpoint.device.ota_in_progress: return self.debug( ( "OTA query_next_image handler for '%s %s': " "field_control=%s, manufacturer_id=%s, image_type=%s, " "current_file_version=%s, hardware_version=%s, model=%r" ), self.endpoint.manufacturer, self.endpoint.model, field_ctrl, manufacturer_id, image_type, current_file_version, hardware_version, model, ) img = await self.endpoint.device.application.ota.get_ota_image( manufacturer_id, image_type, model ) if img is not None: should_update = img.should_update( manufacturer_id, image_type, current_file_version, hardware_version ) self.debug( "OTA image version: %s, size: %s. Update needed: %s", img.version, img.header.image_size, should_update, ) if should_update: # send an event to listener(s) to let them know that an image is available self.endpoint.device.listener_event("device_ota_update_available", img) else: self.debug("No OTA image is available") # always send no image available response so that the device doesn't update autonomously await self.query_next_image_response( foundation.Status.NO_IMAGE_AVAILABLE, tsn=tsn ) class ScheduleRecord(t.Struct): phase_id: t.uint8_t scheduled_time: t.uint16_t class PowerProfilePhase(t.Struct): energy_phase_id: t.uint8_t macro_phase_id: t.uint8_t expected_duration: t.uint16_t peak_power: t.uint16_t energy: t.uint16_t class PowerProfileType(t.Struct): power_profile_id: t.uint8_t energy_phase_id: t.uint8_t power_profile_remote_control: t.Bool power_profile_state: t.uint8_t class PowerProfile(Cluster): ScheduleRecord: Final = ScheduleRecord PowerProfilePhase: Final = PowerProfilePhase PowerProfile: Final = PowerProfileType cluster_id: Final = 0x001A ep_attribute: Final = "power_profile" class AttributeDefs(BaseAttributeDefs): total_profile_num: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="r", mandatory=True ) multiple_scheduling: Final = ZCLAttributeDef( id=0x0001, type=t.Bool, access="r", mandatory=True ) energy_formatting: Final = ZCLAttributeDef( id=0x0002, type=t.bitmap8, access="r", mandatory=True ) energy_remote: Final = ZCLAttributeDef( id=0x0003, type=t.Bool, access="r", mandatory=True ) schedule_mode: Final = ZCLAttributeDef( id=0x0004, type=t.bitmap8, access="rwp", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): power_profile_request: Final = ZCLCommandDef( id=0x00, schema={"power_profile_id": t.uint8_t}, direction=False ) power_profile_state_request: Final = ZCLCommandDef( id=0x01, schema={}, direction=False ) get_power_profile_price_response: Final = ZCLCommandDef( id=0x02, schema={ "power_profile_id": t.uint8_t, "currency": t.uint16_t, "price": t.uint32_t, "price_trailing_digit": t.uint8_t, }, direction=True, ) get_overall_schedule_price_response: Final = ZCLCommandDef( id=0x03, schema={ "currency": t.uint16_t, "price": t.uint32_t, "price_trailing_digit": t.uint8_t, }, direction=True, ) energy_phases_schedule_notification: Final = ZCLCommandDef( id=0x04, schema={ "power_profile_id": t.uint8_t, "scheduled_phases": t.LVList[ScheduleRecord], }, direction=False, ) energy_phases_schedule_response: Final = ZCLCommandDef( id=0x05, schema={ "power_profile_id": t.uint8_t, "scheduled_phases": t.LVList[ScheduleRecord], }, direction=True, ) power_profile_schedule_constraints_request: Final = ZCLCommandDef( id=0x06, schema={"power_profile_id": t.uint8_t}, direction=False, ) energy_phases_schedule_state_request: Final = ZCLCommandDef( id=0x07, schema={"power_profile_id": t.uint8_t}, direction=False, ) get_power_profile_price_extended_response: Final = ZCLCommandDef( id=0x08, schema={ "power_profile_id": t.uint8_t, "currency": t.uint16_t, "price": t.uint32_t, "price_trailing_digit": t.uint8_t, }, direction=True, ) class ClientCommandDefs(BaseCommandDefs): power_profile_notification: Final = ZCLCommandDef( id=0x00, schema={ "total_profile_num": t.uint8_t, "power_profile_id": t.uint8_t, "transfer_phases": t.LVList[PowerProfilePhase], }, direction=False, ) power_profile_response: Final = ZCLCommandDef( id=0x01, schema={ "total_profile_num": t.uint8_t, "power_profile_id": t.uint8_t, "transfer_phases": t.LVList[PowerProfilePhase], }, direction=True, ) power_profile_state_response: Final = ZCLCommandDef( id=0x02, schema={"power_profiles": t.LVList[PowerProfileType]}, direction=True, ) get_power_profile_price: Final = ZCLCommandDef( id=0x03, schema={"power_profile_id": t.uint8_t}, direction=False ) power_profile_state_notification: Final = ZCLCommandDef( id=0x04, schema={"power_profiles": t.LVList[PowerProfileType]}, direction=False, ) get_overall_schedule_price: Final = ZCLCommandDef( id=0x05, schema={}, direction=False ) energy_phases_schedule_request: Final = ZCLCommandDef( id=0x06, schema={"power_profile_id": t.uint8_t}, direction=False, ) energy_phases_schedule_state_response: Final = ZCLCommandDef( id=0x07, schema={ "power_profile_id": t.uint8_t, "num_scheduled_energy_phases": t.uint8_t, }, direction=True, ) energy_phases_schedule_state_notification: Final = ZCLCommandDef( id=0x08, schema={ "power_profile_id": t.uint8_t, "num_scheduled_energy_phases": t.uint8_t, }, direction=False, ) power_profile_schedule_constraints_notification: Final = ZCLCommandDef( id=0x09, schema={ "power_profile_id": t.uint8_t, "start_after": t.uint16_t, "stop_before": t.uint16_t, }, direction=False, ) power_profile_schedule_constraints_response: Final = ZCLCommandDef( id=0x0A, schema={ "power_profile_id": t.uint8_t, "start_after": t.uint16_t, "stop_before": t.uint16_t, }, direction=True, ) get_power_profile_price_extended: Final = ZCLCommandDef( id=0x0B, schema={ "options": t.bitmap8, "power_profile_id": t.uint8_t, "power_profile_start_time?": t.uint16_t, }, direction=False, ) class ApplianceControl(Cluster): cluster_id: Final = 0x001B ep_attribute: Final = "appliance_control" class AttributeDefs(BaseAttributeDefs): start_time: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) finish_time: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="rp", mandatory=True ) remaining_time: Final = ZCLAttributeDef(id=0x0002, type=t.uint16_t, access="rp") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class PollControl(Cluster): cluster_id: Final = 0x0020 name: Final = "Poll Control" ep_attribute: Final = "poll_control" class AttributeDefs(BaseAttributeDefs): checkin_interval: Final = ZCLAttributeDef( id=0x0000, type=t.uint32_t, access="rw", mandatory=True ) long_poll_interval: Final = ZCLAttributeDef( id=0x0001, type=t.uint32_t, access="r", mandatory=True ) short_poll_interval: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) fast_poll_timeout: Final = ZCLAttributeDef( id=0x0003, type=t.uint16_t, access="rw", mandatory=True ) checkin_interval_min: Final = ZCLAttributeDef( id=0x0004, type=t.uint32_t, access="r" ) long_poll_interval_min: Final = ZCLAttributeDef( id=0x0005, type=t.uint32_t, access="r" ) fast_poll_timeout_max: Final = ZCLAttributeDef( id=0x0006, type=t.uint16_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): checkin_response: Final = ZCLCommandDef( id=0x00, schema={"start_fast_polling": t.Bool, "fast_poll_timeout": t.uint16_t}, direction=True, ) fast_poll_stop: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) set_long_poll_interval: Final = ZCLCommandDef( id=0x02, schema={"new_long_poll_interval": t.uint32_t}, direction=False ) set_short_poll_interval: Final = ZCLCommandDef( id=0x03, schema={"new_short_poll_interval": t.uint16_t}, direction=False, ) class ClientCommandDefs(BaseCommandDefs): checkin: Final = ZCLCommandDef(id=0x0000, schema={}, direction=False) class GreenPowerProxy(Cluster): cluster_id: Final = 0x0021 ep_attribute: Final = "green_power" zigpy-0.62.3/zigpy/zcl/clusters/homeautomation.py000066400000000000000000000630011456054056700221610ustar00rootroot00000000000000from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster, foundation from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class ApplianceIdentification(Cluster): cluster_id: Final = 0x0B00 name: Final = "Appliance Identification" ep_attribute: Final = "appliance_id" class AttributeDefs(BaseAttributeDefs): basic_identification: Final = ZCLAttributeDef( id=0x0000, type=t.uint56_t, access="r", mandatory=True ) company_name: Final = ZCLAttributeDef( id=0x0010, type=t.LimitedCharString(16), access="r" ) company_id: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t, access="r") brand_name: Final = ZCLAttributeDef( id=0x0012, type=t.LimitedCharString(16), access="r" ) brand_id: Final = ZCLAttributeDef(id=0x0013, type=t.uint16_t, access="r") model: Final = ZCLAttributeDef(id=0x0014, type=t.LimitedLVBytes(16), access="r") part_number: Final = ZCLAttributeDef( id=0x0015, type=t.LimitedLVBytes(16), access="r" ) product_revision: Final = ZCLAttributeDef( id=0x0016, type=t.LimitedLVBytes(6), access="r" ) software_revision: Final = ZCLAttributeDef( id=0x0017, type=t.LimitedLVBytes(6), access="r" ) product_type_name: Final = ZCLAttributeDef( id=0x0018, type=t.LVBytesSize2, access="r" ) product_type_id: Final = ZCLAttributeDef(id=0x0019, type=t.uint16_t, access="r") ceced_specification_version: Final = ZCLAttributeDef( id=0x001A, type=t.uint8_t, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class MeterIdentification(Cluster): cluster_id: Final = 0x0B01 name: Final = "Meter Identification" ep_attribute: Final = "meter_id" class AttributeDefs(BaseAttributeDefs): company_name: Final = ZCLAttributeDef( id=0x0000, type=t.LimitedCharString(16), access="r", mandatory=True ) meter_type_id: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) data_quality_id: Final = ZCLAttributeDef( id=0x0004, type=t.uint16_t, access="r", mandatory=True ) customer_name: Final = ZCLAttributeDef( id=0x0005, type=t.LimitedCharString(16), access="rw" ) model: Final = ZCLAttributeDef(id=0x0006, type=t.LimitedLVBytes(16), access="r") part_number: Final = ZCLAttributeDef( id=0x0007, type=t.LimitedLVBytes(16), access="r" ) product_revision: Final = ZCLAttributeDef( id=0x0008, type=t.LimitedLVBytes(6), access="r" ) software_revision: Final = ZCLAttributeDef( id=0x000A, type=t.LimitedLVBytes(6), access="r" ) utility_name: Final = ZCLAttributeDef( id=0x000B, type=t.LimitedCharString(16), access="r" ) pod: Final = ZCLAttributeDef( id=0x000C, type=t.LimitedCharString(16), access="r", mandatory=True ) available_power: Final = ZCLAttributeDef( id=0x000D, type=t.int24s, access="r", mandatory=True ) power_threshold: Final = ZCLAttributeDef( id=0x000E, type=t.int24s, access="r", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ApplianceEventAlerts(Cluster): cluster_id: Final = 0x0B02 name: Final = "Appliance Event Alerts" ep_attribute: Final = "appliance_event" class AttributeDefs(BaseAttributeDefs): cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): get_alerts: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) class ClientCommandDefs(BaseCommandDefs): get_alerts_response: Final = ZCLCommandDef(id=0x00, schema={}, direction=True) alerts_notification: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) event_notification: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) class ApplianceStatistics(Cluster): cluster_id: Final = 0x0B03 name: Final = "Appliance Statistics" ep_attribute: Final = "appliance_stats" class AttributeDefs(BaseAttributeDefs): log_max_size: Final = ZCLAttributeDef( id=0x0000, type=t.uint32_t, access="r", mandatory=True ) log_queue_max_size: Final = ZCLAttributeDef( id=0x0001, type=t.uint8_t, access="r", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): log: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) log_queue: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) class ClientCommandDefs(BaseCommandDefs): log_notification: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) log_response: Final = ZCLCommandDef(id=0x01, schema={}, direction=True) log_queue_response: Final = ZCLCommandDef(id=0x02, schema={}, direction=True) statistics_available: Final = ZCLCommandDef(id=0x03, schema={}, direction=False) class MeasurementType(t.bitmap32): Active_measurement_AC = 2 << 0 Reactive_measurement_AC = 2 << 1 Apparent_measurement_AC = 2 << 2 Phase_A_measurement = 2 << 3 Phase_B_measurement = 2 << 4 Phase_C_measurement = 2 << 5 DC_measurement = 2 << 6 Harmonics_measurement = 2 << 7 Power_quality_measurement = 2 << 8 class DCOverloadAlarmMark(t.bitmap8): Voltage_Overload = 0b00000001 Current_Overload = 0b00000010 class ACAlarmsMask(t.bitmap16): Voltage_Overload = 2 << 0 Current_Overload = 2 << 1 Active_Power_Overload = 2 << 2 Reactive_Power_Overload = 2 << 3 Average_RMS_Over_Voltage = 2 << 4 Average_RMS_Under_Voltage = 2 << 5 RMS_Extreme_Over_Voltage = 2 << 6 RMS_Extreme_Under_Voltage = 2 << 7 RMS_Voltage_Sag = 2 << 8 RMS_Voltage_Swell = 2 << 9 class ElectricalMeasurement(Cluster): cluster_id: Final = 0x0B04 name: Final = "Electrical Measurement" ep_attribute: Final = "electrical_measurement" MeasurementType: Final = MeasurementType DCOverloadAlarmMark: Final = DCOverloadAlarmMark ACAlarmsMask: Final = ACAlarmsMask class AttributeDefs(BaseAttributeDefs): # Basic Information measurement_type: Final = ZCLAttributeDef( id=0x0000, type=MeasurementType, access="r", mandatory=True ) # DC Measurement dc_voltage: Final = ZCLAttributeDef(id=0x0100, type=t.int16s, access="rp") dc_voltage_min: Final = ZCLAttributeDef(id=0x0101, type=t.int16s, access="r") dc_voltage_max: Final = ZCLAttributeDef(id=0x0102, type=t.int16s, access="r") dc_current: Final = ZCLAttributeDef(id=0x0103, type=t.int16s, access="rp") dc_current_min: Final = ZCLAttributeDef(id=0x0104, type=t.int16s, access="r") dc_current_max: Final = ZCLAttributeDef(id=0x0105, type=t.int16s, access="r") dc_power: Final = ZCLAttributeDef(id=0x0106, type=t.int16s, access="rp") dc_power_min: Final = ZCLAttributeDef(id=0x0107, type=t.int16s, access="r") dc_power_max: Final = ZCLAttributeDef(id=0x0108, type=t.int16s, access="r") # DC Formatting dc_voltage_multiplier: Final = ZCLAttributeDef( id=0x0200, type=t.uint16_t, access="rp" ) dc_voltage_divisor: Final = ZCLAttributeDef( id=0x0201, type=t.uint16_t, access="rp" ) dc_current_multiplier: Final = ZCLAttributeDef( id=0x0202, type=t.uint16_t, access="rp" ) dc_current_divisor: Final = ZCLAttributeDef( id=0x0203, type=t.uint16_t, access="rp" ) dc_power_multiplier: Final = ZCLAttributeDef( id=0x0204, type=t.uint16_t, access="rp" ) dc_power_divisor: Final = ZCLAttributeDef( id=0x0205, type=t.uint16_t, access="rp" ) # AC (Non-phase Specific) Measurements ac_frequency: Final = ZCLAttributeDef(id=0x0300, type=t.uint16_t, access="rp") ac_frequency_min: Final = ZCLAttributeDef( id=0x0301, type=t.uint16_t, access="r" ) ac_frequency_max: Final = ZCLAttributeDef( id=0x0302, type=t.uint16_t, access="r" ) neutral_current: Final = ZCLAttributeDef( id=0x0303, type=t.uint16_t, access="rp" ) total_active_power: Final = ZCLAttributeDef( id=0x0304, type=t.int32s, access="rp" ) total_reactive_power: Final = ZCLAttributeDef( id=0x0305, type=t.int32s, access="rp" ) total_apparent_power: Final = ZCLAttributeDef( id=0x0306, type=t.uint32_t, access="rp" ) meas1st_harmonic_current: Final = ZCLAttributeDef( id=0x0307, type=t.int16s, access="rp" ) meas3rd_harmonic_current: Final = ZCLAttributeDef( id=0x0308, type=t.int16s, access="rp" ) meas5th_harmonic_current: Final = ZCLAttributeDef( id=0x0309, type=t.int16s, access="rp" ) meas7th_harmonic_current: Final = ZCLAttributeDef( id=0x030A, type=t.int16s, access="rp" ) meas9th_harmonic_current: Final = ZCLAttributeDef( id=0x030B, type=t.int16s, access="rp" ) meas11th_harmonic_current: Final = ZCLAttributeDef( id=0x030C, type=t.int16s, access="rp" ) meas_phase1st_harmonic_current: Final = ZCLAttributeDef( id=0x030D, type=t.int16s, access="rp" ) meas_phase3rd_harmonic_current: Final = ZCLAttributeDef( id=0x030E, type=t.int16s, access="rp" ) meas_phase5th_harmonic_current: Final = ZCLAttributeDef( id=0x030F, type=t.int16s, access="rp" ) meas_phase7th_harmonic_current: Final = ZCLAttributeDef( id=0x0310, type=t.int16s, access="rp" ) meas_phase9th_harmonic_current: Final = ZCLAttributeDef( id=0x0311, type=t.int16s, access="rp" ) meas_phase11th_harmonic_current: Final = ZCLAttributeDef( id=0x0312, type=t.int16s, access="rp" ) # AC (Non-phase specific) Formatting ac_frequency_multiplier: Final = ZCLAttributeDef( id=0x0400, type=t.uint16_t, access="rp" ) ac_frequency_divisor: Final = ZCLAttributeDef( id=0x0401, type=t.uint16_t, access="rp" ) power_multiplier: Final = ZCLAttributeDef( id=0x0402, type=t.uint32_t, access="rp" ) power_divisor: Final = ZCLAttributeDef(id=0x0403, type=t.uint32_t, access="rp") harmonic_current_multiplier: Final = ZCLAttributeDef( id=0x0404, type=t.int8s, access="rp" ) phase_harmonic_current_multiplier: Final = ZCLAttributeDef( id=0x0405, type=t.int8s, access="rp" ) # AC (Single Phase or Phase A) Measurements instantaneous_voltage: Final = ZCLAttributeDef( id=0x0500, type=t.int16s, access="rp" ) instantaneous_line_current: Final = ZCLAttributeDef( id=0x0501, type=t.uint16_t, access="rp" ) instantaneous_active_current: Final = ZCLAttributeDef( id=0x0502, type=t.int16s, access="rp" ) instantaneous_reactive_current: Final = ZCLAttributeDef( id=0x0503, type=t.int16s, access="rp" ) instantaneous_power: Final = ZCLAttributeDef( id=0x0504, type=t.int16s, access="rp" ) rms_voltage: Final = ZCLAttributeDef(id=0x0505, type=t.uint16_t, access="rp") rms_voltage_min: Final = ZCLAttributeDef(id=0x0506, type=t.uint16_t, access="r") rms_voltage_max: Final = ZCLAttributeDef(id=0x0507, type=t.uint16_t, access="r") rms_current: Final = ZCLAttributeDef(id=0x0508, type=t.uint16_t, access="rp") rms_current_min: Final = ZCLAttributeDef(id=0x0509, type=t.uint16_t, access="r") rms_current_max: Final = ZCLAttributeDef(id=0x050A, type=t.uint16_t, access="r") active_power: Final = ZCLAttributeDef(id=0x050B, type=t.int16s, access="rp") active_power_min: Final = ZCLAttributeDef(id=0x050C, type=t.int16s, access="r") active_power_max: Final = ZCLAttributeDef(id=0x050D, type=t.int16s, access="r") reactive_power: Final = ZCLAttributeDef(id=0x050E, type=t.int16s, access="rp") apparent_power: Final = ZCLAttributeDef(id=0x050F, type=t.uint16_t, access="rp") power_factor: Final = ZCLAttributeDef(id=0x0510, type=t.int8s, access="r") average_rms_voltage_meas_period: Final = ZCLAttributeDef( id=0x0511, type=t.uint16_t, access="rw" ) average_rms_over_voltage_counter: Final = ZCLAttributeDef( id=0x0512, type=t.uint16_t, access="rw" ) average_rms_under_voltage_counter: Final = ZCLAttributeDef( id=0x0513, type=t.uint16_t, access="rw" ) rms_extreme_over_voltage_period: Final = ZCLAttributeDef( id=0x0514, type=t.uint16_t, access="rw" ) rms_extreme_under_voltage_period: Final = ZCLAttributeDef( id=0x0515, type=t.uint16_t, access="rw" ) rms_voltage_sag_period: Final = ZCLAttributeDef( id=0x0516, type=t.uint16_t, access="rw" ) rms_voltage_swell_period: Final = ZCLAttributeDef( id=0x0517, type=t.uint16_t, access="rw" ) # AC Formatting ac_voltage_multiplier: Final = ZCLAttributeDef( id=0x0600, type=t.uint16_t, access="rp" ) ac_voltage_divisor: Final = ZCLAttributeDef( id=0x0601, type=t.uint16_t, access="rp" ) ac_current_multiplier: Final = ZCLAttributeDef( id=0x0602, type=t.uint16_t, access="rp" ) ac_current_divisor: Final = ZCLAttributeDef( id=0x0603, type=t.uint16_t, access="rp" ) ac_power_multiplier: Final = ZCLAttributeDef( id=0x0604, type=t.uint16_t, access="rp" ) ac_power_divisor: Final = ZCLAttributeDef( id=0x0605, type=t.uint16_t, access="rp" ) # DC Manufacturer Threshold Alarms dc_overload_alarms_mask: Final = ZCLAttributeDef( id=0x0700, type=DCOverloadAlarmMark, access="rp" ) dc_voltage_overload: Final = ZCLAttributeDef( id=0x0701, type=t.int16s, access="rp" ) dc_current_overload: Final = ZCLAttributeDef( id=0x0702, type=t.int16s, access="rp" ) # AC Manufacturer Threshold Alarms ac_alarms_mask: Final = ZCLAttributeDef( id=0x0800, type=ACAlarmsMask, access="rw" ) ac_voltage_overload: Final = ZCLAttributeDef( id=0x0801, type=t.int16s, access="r" ) ac_current_overload: Final = ZCLAttributeDef( id=0x0802, type=t.int16s, access="r" ) ac_active_power_overload: Final = ZCLAttributeDef( id=0x0803, type=t.int16s, access="r" ) ac_reactive_power_overload: Final = ZCLAttributeDef( id=0x0804, type=t.int16s, access="r" ) average_rms_over_voltage: Final = ZCLAttributeDef( id=0x0805, type=t.int16s, access="r" ) average_rms_under_voltage: Final = ZCLAttributeDef( id=0x0806, type=t.int16s, access="r" ) rms_extreme_over_voltage: Final = ZCLAttributeDef( id=0x0807, type=t.int16s, access="rw" ) rms_extreme_under_voltage: Final = ZCLAttributeDef( id=0x0808, type=t.int16s, access="rw" ) rms_voltage_sag: Final = ZCLAttributeDef(id=0x0809, type=t.int16s, access="rw") rms_voltage_swell: Final = ZCLAttributeDef( id=0x080A, type=t.int16s, access="rw" ) # AC Phase B Measurements line_current_ph_b: Final = ZCLAttributeDef( id=0x0901, type=t.uint16_t, access="rp" ) active_current_ph_b: Final = ZCLAttributeDef( id=0x0902, type=t.int16s, access="rp" ) reactive_current_ph_b: Final = ZCLAttributeDef( id=0x0903, type=t.int16s, access="rp" ) rms_voltage_ph_b: Final = ZCLAttributeDef( id=0x0905, type=t.uint16_t, access="rp" ) rms_voltage_min_ph_b: Final = ZCLAttributeDef( id=0x0906, type=t.uint16_t, access="r" ) rms_voltage_max_ph_b: Final = ZCLAttributeDef( id=0x0907, type=t.uint16_t, access="r" ) rms_current_ph_b: Final = ZCLAttributeDef( id=0x0908, type=t.uint16_t, access="rp" ) rms_current_min_ph_b: Final = ZCLAttributeDef( id=0x0909, type=t.uint16_t, access="r" ) rms_current_max_ph_b: Final = ZCLAttributeDef( id=0x090A, type=t.uint16_t, access="r" ) active_power_ph_b: Final = ZCLAttributeDef( id=0x090B, type=t.int16s, access="rp" ) active_power_min_ph_b: Final = ZCLAttributeDef( id=0x090C, type=t.int16s, access="r" ) active_power_max_ph_b: Final = ZCLAttributeDef( id=0x090D, type=t.int16s, access="r" ) reactive_power_ph_b: Final = ZCLAttributeDef( id=0x090E, type=t.int16s, access="rp" ) apparent_power_ph_b: Final = ZCLAttributeDef( id=0x090F, type=t.uint16_t, access="rp" ) power_factor_ph_b: Final = ZCLAttributeDef(id=0x0910, type=t.int8s, access="r") average_rms_voltage_measure_period_ph_b: Final = ZCLAttributeDef( id=0x0911, type=t.uint16_t, access="rw" ) average_rms_over_voltage_counter_ph_b: Final = ZCLAttributeDef( id=0x0912, type=t.uint16_t, access="rw" ) average_under_voltage_counter_ph_b: Final = ZCLAttributeDef( id=0x0913, type=t.uint16_t, access="rw" ) rms_extreme_over_voltage_period_ph_b: Final = ZCLAttributeDef( id=0x0914, type=t.uint16_t, access="rw" ) rms_extreme_under_voltage_period_ph_b: Final = ZCLAttributeDef( id=0x0915, type=t.uint16_t, access="rw" ) rms_voltage_sag_period_ph_b: Final = ZCLAttributeDef( id=0x0916, type=t.uint16_t, access="rw" ) rms_voltage_swell_period_ph_b: Final = ZCLAttributeDef( id=0x0917, type=t.uint16_t, access="rw" ) # AC Phase C Measurements line_current_ph_c: Final = ZCLAttributeDef( id=0x0A01, type=t.uint16_t, access="rp" ) active_current_ph_c: Final = ZCLAttributeDef( id=0x0A02, type=t.int16s, access="rp" ) reactive_current_ph_c: Final = ZCLAttributeDef( id=0x0A03, type=t.int16s, access="rp" ) rms_voltage_ph_c: Final = ZCLAttributeDef( id=0x0A05, type=t.uint16_t, access="rp" ) rms_voltage_min_ph_c: Final = ZCLAttributeDef( id=0x0A06, type=t.uint16_t, access="r" ) rms_voltage_max_ph_c: Final = ZCLAttributeDef( id=0x0A07, type=t.uint16_t, access="r" ) rms_current_ph_c: Final = ZCLAttributeDef( id=0x0A08, type=t.uint16_t, access="rp" ) rms_current_min_ph_c: Final = ZCLAttributeDef( id=0x0A09, type=t.uint16_t, access="r" ) rms_current_max_ph_c: Final = ZCLAttributeDef( id=0x0A0A, type=t.uint16_t, access="r" ) active_power_ph_c: Final = ZCLAttributeDef( id=0x0A0B, type=t.int16s, access="rp" ) active_power_min_ph_c: Final = ZCLAttributeDef( id=0x0A0C, type=t.int16s, access="r" ) active_power_max_ph_c: Final = ZCLAttributeDef( id=0x0A0D, type=t.int16s, access="r" ) reactive_power_ph_c: Final = ZCLAttributeDef( id=0x0A0E, type=t.int16s, access="rp" ) apparent_power_ph_c: Final = ZCLAttributeDef( id=0x0A0F, type=t.uint16_t, access="rp" ) power_factor_ph_c: Final = ZCLAttributeDef(id=0x0A10, type=t.int8s, access="r") average_rms_voltage_meas_period_ph_c: Final = ZCLAttributeDef( id=0x0A11, type=t.uint16_t, access="rw" ) average_rms_over_voltage_counter_ph_c: Final = ZCLAttributeDef( id=0x0A12, type=t.uint16_t, access="rw" ) average_under_voltage_counter_ph_c: Final = ZCLAttributeDef( id=0x0A13, type=t.uint16_t, access="rw" ) rms_extreme_over_voltage_period_ph_c: Final = ZCLAttributeDef( id=0x0A14, type=t.uint16_t, access="rw" ) rms_extreme_under_voltage_period_ph_c: Final = ZCLAttributeDef( id=0x0A15, type=t.uint16_t, access="rw" ) rms_voltage_sag_period_ph_c: Final = ZCLAttributeDef( id=0x0A16, type=t.uint16_t, access="rw" ) rms_voltage_swell_period_ph_c: Final = ZCLAttributeDef( id=0x0A17, type=t.uint16_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): get_profile_info: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) get_measurement_profile: Final = ZCLCommandDef( id=0x01, schema={}, direction=False ) class ClientCommandDefs(BaseCommandDefs): get_profile_info_response: Final = ZCLCommandDef( id=0x00, schema={}, direction=True ) get_measurement_profile_response: Final = ZCLCommandDef( id=0x01, schema={}, direction=True ) class Diagnostic(Cluster): cluster_id: Final = 0x0B05 ep_attribute: Final = "diagnostic" class AttributeDefs(BaseAttributeDefs): # Hardware Information number_of_resets: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="r" ) persistent_memory_writes: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r" ) # Stack/Network Information mac_rx_bcast: Final = ZCLAttributeDef(id=0x0100, type=t.uint32_t, access="r") mac_tx_bcast: Final = ZCLAttributeDef(id=0x0101, type=t.uint32_t, access="r") mac_rx_ucast: Final = ZCLAttributeDef(id=0x0102, type=t.uint32_t, access="r") mac_tx_ucast: Final = ZCLAttributeDef(id=0x0103, type=t.uint32_t, access="r") mac_tx_ucast_retry: Final = ZCLAttributeDef( id=0x0104, type=t.uint16_t, access="r" ) mac_tx_ucast_fail: Final = ZCLAttributeDef( id=0x0105, type=t.uint16_t, access="r" ) aps_rx_bcast: Final = ZCLAttributeDef(id=0x0106, type=t.uint16_t, access="r") aps_tx_bcast: Final = ZCLAttributeDef(id=0x0107, type=t.uint16_t, access="r") aps_rx_ucast: Final = ZCLAttributeDef(id=0x0108, type=t.uint16_t, access="r") aps_tx_ucast_success: Final = ZCLAttributeDef( id=0x0109, type=t.uint16_t, access="r" ) aps_tx_ucast_retry: Final = ZCLAttributeDef( id=0x010A, type=t.uint16_t, access="r" ) aps_tx_ucast_fail: Final = ZCLAttributeDef( id=0x010B, type=t.uint16_t, access="r" ) route_disc_initiated: Final = ZCLAttributeDef( id=0x010C, type=t.uint16_t, access="r" ) neighbor_added: Final = ZCLAttributeDef(id=0x010D, type=t.uint16_t, access="r") neighbor_removed: Final = ZCLAttributeDef( id=0x010E, type=t.uint16_t, access="r" ) neighbor_stale: Final = ZCLAttributeDef(id=0x010F, type=t.uint16_t, access="r") join_indication: Final = ZCLAttributeDef(id=0x0110, type=t.uint16_t, access="r") child_moved: Final = ZCLAttributeDef(id=0x0111, type=t.uint16_t, access="r") nwk_fc_failure: Final = ZCLAttributeDef(id=0x0112, type=t.uint16_t, access="r") aps_fc_failure: Final = ZCLAttributeDef(id=0x0113, type=t.uint16_t, access="r") aps_unauthorized_key: Final = ZCLAttributeDef( id=0x0114, type=t.uint16_t, access="r" ) nwk_decrypt_failures: Final = ZCLAttributeDef( id=0x0115, type=t.uint16_t, access="r" ) aps_decrypt_failures: Final = ZCLAttributeDef( id=0x0116, type=t.uint16_t, access="r" ) packet_buffer_allocate_failures: Final = ZCLAttributeDef( id=0x0117, type=t.uint16_t, access="r" ) relayed_ucast: Final = ZCLAttributeDef(id=0x0118, type=t.uint16_t, access="r") phy_to_mac_queue_limit_reached: Final = ZCLAttributeDef( id=0x0119, type=t.uint16_t, access="r" ) packet_validate_drop_count: Final = ZCLAttributeDef( id=0x011A, type=t.uint16_t, access="r" ) average_mac_retry_per_aps_message_sent: Final = ZCLAttributeDef( id=0x011B, type=t.uint16_t, access="r" ) last_message_lqi: Final = ZCLAttributeDef(id=0x011C, type=t.uint8_t, access="r") last_message_rssi: Final = ZCLAttributeDef(id=0x011D, type=t.int8s, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR zigpy-0.62.3/zigpy/zcl/clusters/hvac.py000066400000000000000000000512331456054056700200550ustar00rootroot00000000000000"""HVAC Functional Domain""" from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class PumpAlarmMask(t.bitmap16): Supply_voltage_too_low = 0x0001 Supply_voltage_too_high = 0x0002 Power_missing_phase = 0x0004 System_pressure_too_low = 0x0008 System_pressure_too_high = 0x0010 Dry_running = 0x0020 Motor_temperature_too_high = 0x0040 Pump_motor_has_fatal_failure = 0x0080 Electronic_temperature_too_high = 0x0100 Pump_blocked = 0x0200 Sensor_failure = 0x0400 Electronic_non_fatal_failure = 0x0800 Electronic_fatal_failure = 0x1000 General_fault = 0x2000 class ControlMode(t.enum8): Constant_speed = 0x00 Constant_pressure = 0x01 Proportional_pressure = 0x02 Constant_flow = 0x03 Constant_temperature = 0x05 Automatic = 0x07 class OperationMode(t.enum8): Normal = 0x00 Minimum = 0x01 Maximum = 0x02 Local = 0x03 class PumpStatus(t.bitmap16): Device_fault = 0x0001 Supply_fault = 0x0002 Speed_low = 0x0004 Speed_high = 0x0008 Local_override = 0x0010 Running = 0x0020 Remote_Pressure = 0x0040 Remote_Flow = 0x0080 Remote_Temperature = 0x0100 class Pump(Cluster): """An interface for configuring and controlling pumps.""" AlarmMask: Final = PumpAlarmMask ControlMode: Final = ControlMode OperationMode: Final = OperationMode PumpStatus: Final = PumpStatus cluster_id: Final = 0x0200 name: Final = "Pump Configuration and Control" ep_attribute: Final = "pump" class AttributeDefs(BaseAttributeDefs): # Pump Information max_pressure: Final = ZCLAttributeDef( id=0x0000, type=t.int16s, access="r", mandatory=True ) max_speed: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_flow: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) min_const_pressure: Final = ZCLAttributeDef( id=0x0003, type=t.int16s, access="r" ) max_const_pressure: Final = ZCLAttributeDef( id=0x0004, type=t.int16s, access="r" ) min_comp_pressure: Final = ZCLAttributeDef(id=0x0005, type=t.int16s, access="r") max_comp_pressure: Final = ZCLAttributeDef(id=0x0006, type=t.int16s, access="r") min_const_speed: Final = ZCLAttributeDef(id=0x0007, type=t.uint16_t, access="r") max_const_speed: Final = ZCLAttributeDef(id=0x0008, type=t.uint16_t, access="r") min_const_flow: Final = ZCLAttributeDef(id=0x0009, type=t.uint16_t, access="r") max_const_flow: Final = ZCLAttributeDef(id=0x000A, type=t.uint16_t, access="r") min_const_temp: Final = ZCLAttributeDef(id=0x000B, type=t.int16s, access="r") max_const_temp: Final = ZCLAttributeDef(id=0x000C, type=t.int16s, access="r") # Pump Dynamic Information pump_status: Final = ZCLAttributeDef(id=0x0010, type=PumpStatus, access="rp") effective_operation_mode: Final = ZCLAttributeDef( id=0x0011, type=OperationMode, access="r", mandatory=True ) effective_control_mode: Final = ZCLAttributeDef( id=0x0012, type=ControlMode, access="r", mandatory=True ) capacity: Final = ZCLAttributeDef( id=0x0013, type=t.int16s, access="rp", mandatory=True ) speed: Final = ZCLAttributeDef(id=0x0014, type=t.uint16_t, access="r") lifetime_running_hours: Final = ZCLAttributeDef( id=0x0015, type=t.uint24_t, access="rw" ) power: Final = ZCLAttributeDef(id=0x0016, type=t.uint24_t, access="rw") lifetime_energy_consumed: Final = ZCLAttributeDef( id=0x0017, type=t.uint32_t, access="r" ) # Pump Settings operation_mode: Final = ZCLAttributeDef( id=0x0020, type=OperationMode, access="rw", mandatory=True ) control_mode: Final = ZCLAttributeDef(id=0x0021, type=ControlMode, access="rw") alarm_mask: Final = ZCLAttributeDef(id=0x0022, type=PumpAlarmMask, access="r") class CoolingSystemStage(t.enum8): Cool_Stage_1 = 0x00 Cool_Stage_2 = 0x01 Cool_Stage_3 = 0x02 Reserved = 0x03 class HeatingSystemStage(t.enum8): Heat_Stage_1 = 0x00 Heat_Stage_2 = 0x01 Heat_Stage_3 = 0x02 Reserved = 0x03 class HeatingSystemType(t.enum8): Conventional = 0x00 Heat_Pump = 0x01 class HeatingFuelSource(t.enum8): Electric = 0x00 Gas = 0x01 class ACCapacityFormat(t.enum8): BTUh = 0x00 class ACCompressorType(t.enum8): Reserved = 0x00 T1_max_working_43C = 0x01 T2_max_working_35C = 0x02 T3_max_working_52C = 0x03 class ACType(t.enum8): Reserved = 0x00 Cooling_fixed_speed = 0x01 Heat_Pump_fixed_speed = 0x02 Cooling_Inverter = 0x03 Heat_Pump_Inverter = 0x04 class ACRefrigerantType(t.enum8): Reserved = 0x00 R22 = 0x01 R410a = 0x02 R407c = 0x03 class ACErrorCode(t.bitmap32): No_Errors = 0x00000000 Commpressor_Failure = 0x00000001 Room_Temperature_Sensor_Failure = 0x00000002 Outdoor_Temperature_Sensor_Failure = 0x00000004 Indoor_Coil_Temperature_Sensor_Failure = 0x00000008 Fan_Failure = 0x00000010 class ACLouverPosition(t.enum8): Closed = 0x01 Open = 0x02 Qurter_Open = 0x03 Half_Open = 0x04 Three_Quarters_Open = 0x05 class AlarmMask(t.bitmap8): No_Alarms = 0x00 Initialization_failure = 0x01 Hardware_failure = 0x02 Self_calibration_failure = 0x04 class ControlSequenceOfOperation(t.enum8): Cooling_Only = 0x00 Cooling_With_Reheat = 0x01 Heating_Only = 0x02 Heating_With_Reheat = 0x03 Cooling_and_Heating = 0x04 Cooling_and_Heating_with_Reheat = 0x05 class SeqDayOfWeek(t.bitmap8): Sunday = 0x01 Monday = 0x02 Tuesday = 0x04 Wednesday = 0x08 Thursday = 0x10 Friday = 0x20 Saturday = 0x40 Away = 0x80 class SeqMode(t.bitmap8): Heat = 0x01 Cool = 0x02 class Occupancy(t.bitmap8): Unoccupied = 0x00 Occupied = 0x01 class ProgrammingOperationMode(t.bitmap8): Simple = 0x00 Schedule_programming_mode = 0x01 Auto_recovery_mode = 0x02 Economy_mode = 0x04 class RemoteSensing(t.bitmap8): all_local = 0x00 local_temperature_sensed_remotely = 0x01 outdoor_temperature_sensed_remotely = 0x02 occupancy_sensed_remotely = 0x04 class SetpointChangeSource(t.enum8): Manual = 0x00 Schedule = 0x01 External = 0x02 class SetpointMode(t.enum8): Heat = 0x00 Cool = 0x01 Both = 0x02 class StartOfWeek(t.enum8): Sunday = 0x00 Monday = 0x01 Tuesday = 0x02 Wednesday = 0x03 Thursday = 0x04 Friday = 0x05 Saturday = 0x06 class SystemMode(t.enum8): Off = 0x00 Auto = 0x01 Cool = 0x03 Heat = 0x04 Emergency_Heating = 0x05 Pre_cooling = 0x06 Fan_only = 0x07 Dry = 0x08 Sleep = 0x09 class SystemType(t.bitmap8): Heat_and_or_Cool_Stage_1 = 0x00 Cool_Stage_1 = 0x01 Cool_Stage_2 = 0x02 Heat_Stage_1 = 0x04 Heat_Stage_2 = 0x08 Heat_Pump = 0x10 Gas = 0x20 @property def cooling_system_stage(self) -> CoolingSystemStage: return CoolingSystemStage(self & 0x03) @property def heating_system_stage(self) -> HeatingSystemStage: return HeatingSystemStage((self >> 2) & 0x03) @property def heating_system_type(self) -> HeatingSystemType: return HeatingSystemType((self >> 4) & 0x01) @property def heating_fuel_source(self) -> HeatingFuelSource: return HeatingFuelSource((self >> 5) & 0x01) class TemperatureSetpointHold(t.enum8): Setpoint_Hold_Off = 0x00 Setpoint_Hold_On = 0x01 class RunningMode(t.enum8): Off = 0x00 Cool = 0x03 Heat = 0x04 class RunningState(t.bitmap16): Idle = 0x0000 Heat_State_On = 0x0001 Cool_State_On = 0x0002 Fan_State_On = 0x0004 Heat_2nd_Stage_On = 0x0008 Cool_2nd_Stage_On = 0x0010 Fan_2nd_Stage_On = 0x0020 Fan_3rd_Stage_On = 0x0040 class Thermostat(Cluster): """An interface for configuring and controlling the functionality of a thermostat. """ ACCapacityFormat: Final = ACCapacityFormat ACErrorCode: Final = ACErrorCode ACLouverPosition: Final = ACLouverPosition AlarmMask: Final = AlarmMask ControlSequenceOfOperation: Final = ControlSequenceOfOperation SeqDayOfWeek: Final = SeqDayOfWeek SeqMode: Final = SeqMode Occupancy: Final = Occupancy ProgrammingOperationMode: Final = ProgrammingOperationMode RemoteSensing: Final = RemoteSensing SetpointChangeSource: Final = SetpointChangeSource SetpointMode: Final = SetpointMode StartOfWeek: Final = StartOfWeek SystemMode: Final = SystemMode SystemType: Final = SystemType TemperatureSetpointHold: Final = TemperatureSetpointHold RunningMode: Final = RunningMode RunningState: Final = RunningState cluster_id: Final = 0x0201 ep_attribute: Final = "thermostat" class AttributeDefs(BaseAttributeDefs): # Thermostat Information local_temperature: Final = ZCLAttributeDef( id=0x0000, type=t.int16s, access="rp", mandatory=True ) outdoor_temperature: Final = ZCLAttributeDef( id=0x0001, type=t.int16s, access="r" ) occupancy: Final = ZCLAttributeDef(id=0x0002, type=Occupancy, access="r") abs_min_heat_setpoint_limit: Final = ZCLAttributeDef( id=0x0003, type=t.int16s, access="r" ) abs_max_heat_setpoint_limit: Final = ZCLAttributeDef( id=0x0004, type=t.int16s, access="r" ) abs_min_cool_setpoint_limit: Final = ZCLAttributeDef( id=0x0005, type=t.int16s, access="r" ) abs_max_cool_setpoint_limit: Final = ZCLAttributeDef( id=0x0006, type=t.int16s, access="r" ) pi_cooling_demand: Final = ZCLAttributeDef( id=0x0007, type=t.uint8_t, access="rp" ) pi_heating_demand: Final = ZCLAttributeDef( id=0x0008, type=t.uint8_t, access="rp" ) system_type_config: Final = ZCLAttributeDef( id=0x0009, type=SystemType, access="r*w" ) # Thermostat Settings local_temperature_calibration: Final = ZCLAttributeDef( id=0x0010, type=t.int8s, access="rw" ) # At least one of these two attribute sets will be available occupied_cooling_setpoint: Final = ZCLAttributeDef( id=0x0011, type=t.int16s, access="rws" ) occupied_heating_setpoint: Final = ZCLAttributeDef( id=0x0012, type=t.int16s, access="rws" ) unoccupied_cooling_setpoint: Final = ZCLAttributeDef( id=0x0013, type=t.int16s, access="rw" ) unoccupied_heating_setpoint: Final = ZCLAttributeDef( id=0x0014, type=t.int16s, access="rw" ) min_heat_setpoint_limit: Final = ZCLAttributeDef( id=0x0015, type=t.int16s, access="rw" ) max_heat_setpoint_limit: Final = ZCLAttributeDef( id=0x0016, type=t.int16s, access="rw" ) min_cool_setpoint_limit: Final = ZCLAttributeDef( id=0x0017, type=t.int16s, access="rw" ) max_cool_setpoint_limit: Final = ZCLAttributeDef( id=0x0018, type=t.int16s, access="rw" ) min_setpoint_dead_band: Final = ZCLAttributeDef( id=0x0019, type=t.int8s, access="r*w" ) remote_sensing: Final = ZCLAttributeDef( id=0x001A, type=RemoteSensing, access="rw" ) ctrl_sequence_of_oper: Final = ZCLAttributeDef( id=0x001B, type=ControlSequenceOfOperation, access="rw", mandatory=True, ) system_mode: Final = ZCLAttributeDef( id=0x001C, type=SystemMode, access="rws", mandatory=True ) alarm_mask: Final = ZCLAttributeDef(id=0x001D, type=AlarmMask, access="r") running_mode: Final = ZCLAttributeDef(id=0x001E, type=RunningMode, access="r") # Schedule start_of_week: Final = ZCLAttributeDef(id=0x0020, type=StartOfWeek, access="r") number_of_weekly_transitions: Final = ZCLAttributeDef( id=0x0021, type=t.uint8_t, access="r" ) number_of_daily_transitions: Final = ZCLAttributeDef( id=0x0022, type=t.uint8_t, access="r" ) temp_setpoint_hold: Final = ZCLAttributeDef( id=0x0023, type=TemperatureSetpointHold, access="rw" ) temp_setpoint_hold_duration: Final = ZCLAttributeDef( id=0x0024, type=t.uint16_t, access="rw" ) programing_oper_mode: Final = ZCLAttributeDef( id=0x0025, type=ProgrammingOperationMode, access="rwp" ) # HVAC Relay running_state: Final = ZCLAttributeDef(id=0x0029, type=RunningState, access="r") # Thermostat Setpoint Change Tracking setpoint_change_source: Final = ZCLAttributeDef( id=0x0030, type=SetpointChangeSource, access="r" ) setpoint_change_amount: Final = ZCLAttributeDef( id=0x0031, type=t.int16s, access="r" ) setpoint_change_source_timestamp: Final = ZCLAttributeDef( id=0x0032, type=t.UTCTime, access="r" ) occupied_setback: Final = ZCLAttributeDef( id=0x0034, type=t.uint8_t, access="rw" ) occupied_setback_min: Final = ZCLAttributeDef( id=0x0035, type=t.uint8_t, access="r" ) occupied_setback_max: Final = ZCLAttributeDef( id=0x0036, type=t.uint8_t, access="r" ) unoccupied_setback: Final = ZCLAttributeDef( id=0x0037, type=t.uint8_t, access="rw" ) unoccupied_setback_min: Final = ZCLAttributeDef( id=0x0038, type=t.uint8_t, access="r" ) unoccupied_setback_max: Final = ZCLAttributeDef( id=0x0039, type=t.uint8_t, access="r" ) emergency_heat_delta: Final = ZCLAttributeDef( id=0x003A, type=t.uint8_t, access="rw" ) # AC Information ac_type: Final = ZCLAttributeDef(id=0x0040, type=ACType, access="rw") ac_capacity: Final = ZCLAttributeDef(id=0x0041, type=t.uint16_t, access="rw") ac_refrigerant_type: Final = ZCLAttributeDef( id=0x0042, type=ACRefrigerantType, access="rw" ) ac_compressor_type: Final = ZCLAttributeDef( id=0x0043, type=ACCompressorType, access="rw" ) ac_error_code: Final = ZCLAttributeDef(id=0x0044, type=ACErrorCode, access="rw") ac_louver_position: Final = ZCLAttributeDef( id=0x0045, type=ACLouverPosition, access="rw" ) ac_coil_temperature: Final = ZCLAttributeDef( id=0x0046, type=t.int16s, access="r" ) ac_capacity_format: Final = ZCLAttributeDef( id=0x0047, type=ACCapacityFormat, access="rw" ) class ServerCommandDefs(BaseCommandDefs): setpoint_raise_lower: Final = ZCLCommandDef( id=0x00, schema={"mode": SetpointMode, "amount": t.int8s}, direction=False ) set_weekly_schedule: Final = ZCLCommandDef( id=0x01, schema={ "num_transitions_for_sequence": t.enum8, "day_of_week_for_sequence": SeqDayOfWeek, "mode_for_sequence": SeqMode, "values": t.List[t.int16s], }, direction=False, ) get_weekly_schedule: Final = ZCLCommandDef( id=0x02, schema={"days_to_return": SeqDayOfWeek, "mode_to_return": SeqMode}, direction=False, ) clear_weekly_schedule: Final = ZCLCommandDef( id=0x03, schema={}, direction=False ) get_relay_status_log: Final = ZCLCommandDef(id=0x04, schema={}, direction=False) class ClientCommandDefs(BaseCommandDefs): get_weekly_schedule_response: Final = ZCLCommandDef( id=0x00, schema={ "num_transitions_for_sequence": t.enum8, "day_of_week_for_sequence": SeqDayOfWeek, "mode_for_sequence": SeqMode, "values": t.List[t.int16s], }, direction=True, ) get_relay_status_log_response: Final = ZCLCommandDef( id=0x01, schema={ "time_of_day": t.uint16_t, "relay_status": t.bitmap8, "local_temperature": t.int16s, "humidity_in_percentage": t.uint8_t, "set_point": t.int16s, "unread_entries": t.uint16_t, }, direction=True, ) class FanMode(t.enum8): Off = 0x00 Low = 0x01 Medium = 0x02 High = 0x03 On = 0x04 Auto = 0x05 Smart = 0x06 class FanModeSequence(t.enum8): Low_Med_High = 0x00 Low_High = 0x01 Low_Med_High_Auto = 0x02 Low_High_Auto = 0x03 On_Auto = 0x04 class Fan(Cluster): """An interface for controlling a fan in a heating / cooling system. """ FanMode: Final = FanMode FanModeSequence: Final = FanModeSequence cluster_id: Final = 0x0202 name: Final = "Fan Control" ep_attribute: Final = "fan" class AttributeDefs(BaseAttributeDefs): fan_mode: Final = ZCLAttributeDef(id=0x0000, type=FanMode, access="") fan_mode_sequence: Final = ZCLAttributeDef( id=0x0001, type=FanModeSequence, access="" ) class RelativeHumidityMode(t.enum8): RH_measured_locally = 0x00 RH_measured_remotely = 0x01 class DehumidificationLockout(t.enum8): Dehumidification_not_allowed = 0x00 Dehumidification_is_allowed = 0x01 class RelativeHumidityDisplay(t.enum8): RH_not_displayed = 0x00 RH_is_displayed = 0x01 class Dehumidification(Cluster): """An interface for controlling dehumidification.""" RelativeHumidityMode: Final = RelativeHumidityMode DehumidificationLockout: Final = DehumidificationLockout RelativeHumidityDisplay: Final = RelativeHumidityDisplay cluster_id: Final = 0x0203 ep_attribute: Final = "dehumidification" class AttributeDefs(BaseAttributeDefs): # Dehumidification Information relative_humidity: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="r" ) dehumidification_cooling: Final = ZCLAttributeDef( id=0x0001, type=t.uint8_t, access="rp", mandatory=True ) # Dehumidification Settings rh_dehumidification_setpoint: Final = ZCLAttributeDef( id=0x0010, type=t.uint8_t, access="rw", mandatory=True ) relative_humidity_mode: Final = ZCLAttributeDef( id=0x0011, type=RelativeHumidityMode, access="rw" ) dehumidification_lockout: Final = ZCLAttributeDef( id=0x0012, type=DehumidificationLockout, access="rw" ) dehumidification_hysteresis: Final = ZCLAttributeDef( id=0x0013, type=t.uint8_t, access="rw", mandatory=True ) dehumidification_max_cool: Final = ZCLAttributeDef( id=0x0014, type=t.uint8_t, access="rw", mandatory=True ) relative_humidity_display: Final = ZCLAttributeDef( id=0x0015, type=RelativeHumidityDisplay, access="rw" ) class TemperatureDisplayMode(t.enum8): Metric = 0x00 Imperial = 0x01 class KeypadLockout(t.enum8): No_lockout = 0x00 Level_1_lockout = 0x01 Level_2_lockout = 0x02 Level_3_lockout = 0x03 Level_4_lockout = 0x04 Level_5_lockout = 0x05 class ScheduleProgrammingVisibility(t.enum8): Enabled = 0x00 Disabled = 0x02 class UserInterface(Cluster): """An interface for configuring the user interface of a thermostat (which may be remote from the thermostat). """ TemperatureDisplayMode: Final = TemperatureDisplayMode KeypadLockout: Final = KeypadLockout ScheduleProgrammingVisibility: Final = ScheduleProgrammingVisibility cluster_id: Final = 0x0204 name: Final = "Thermostat User Interface Configuration" ep_attribute: Final = "thermostat_ui" class AttributeDefs(BaseAttributeDefs): temperature_display_mode: Final = ZCLAttributeDef( id=0x0000, type=TemperatureDisplayMode, access="rw", mandatory=True, ) keypad_lockout: Final = ZCLAttributeDef( id=0x0001, type=KeypadLockout, access="rw", mandatory=True ) schedule_programming_visibility: Final = ZCLAttributeDef( id=0x0002, type=ScheduleProgrammingVisibility, access="rw", ) zigpy-0.62.3/zigpy/zcl/clusters/lighting.py000066400000000000000000000416641456054056700207500ustar00rootroot00000000000000"""Lighting Functional Domain""" from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster, foundation from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class ColorMode(t.enum8): Hue_and_saturation = 0x00 X_and_Y = 0x01 Color_temperature = 0x02 class EnhancedColorMode(t.enum8): Hue_and_saturation = 0x00 X_and_Y = 0x01 Color_temperature = 0x02 Enhanced_hue_and_saturation = 0x03 class ColorCapabilities(t.bitmap16): Hue_and_saturation = 0b00000000_00000001 Enhanced_hue = 0b00000000_00000010 Color_loop = 0b00000000_00000100 XY_attributes = 0b00000000_00001000 Color_temperature = 0b00000000_00010000 class Direction(t.enum8): Shortest_distance = 0x00 Longest_distance = 0x01 Up = 0x02 Down = 0x03 class MoveMode(t.enum8): Stop = 0x00 Up = 0x01 Down = 0x03 class StepMode(t.enum8): Up = 0x01 Down = 0x03 class ColorLoopUpdateFlags(t.bitmap8): Action = 0b0000_0001 Direction = 0b0000_0010 Time = 0b0000_0100 Start_Hue = 0b0000_1000 class ColorLoopAction(t.enum8): Deactivate = 0x00 Activate_from_color_loop_hue = 0x01 Activate_from_current_hue = 0x02 class ColorLoopDirection(t.enum8): Decrement = 0x00 Increment = 0x01 class DriftCompensation(t.enum8): NONE = 0x00 Other_or_unknown = 0x01 Temperature_monitoring = 0x02 Luminance_monitoring = 0x03 Color_monitoring = 0x03 class Options(t.bitmap8): Execute_if_off = 0b00000001 class Color(Cluster): """Attributes and commands for controlling the color properties of a color-capable light """ ColorMode: Final = ColorMode EnhancedColorMode: Final = EnhancedColorMode ColorCapabilities: Final = ColorCapabilities Direction: Final = Direction MoveMode: Final = MoveMode StepMode: Final = StepMode ColorLoopUpdateFlags: Final = ColorLoopUpdateFlags ColorLoopAction: Final = ColorLoopAction ColorLoopDirection: Final = ColorLoopDirection DriftCompensation: Final = DriftCompensation Options: Final = Options cluster_id: Final = 0x0300 name: Final = "Color Control" ep_attribute: Final = "light_color" class AttributeDefs(BaseAttributeDefs): current_hue: Final = ZCLAttributeDef(id=0x0000, type=t.uint8_t, access="rp") current_saturation: Final = ZCLAttributeDef( id=0x0001, type=t.uint8_t, access="rps" ) remaining_time: Final = ZCLAttributeDef(id=0x0002, type=t.uint16_t, access="r") current_x: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="rps") current_y: Final = ZCLAttributeDef(id=0x0004, type=t.uint16_t, access="rps") drift_compensation: Final = ZCLAttributeDef( id=0x0005, type=DriftCompensation, access="r" ) compensation_text: Final = ZCLAttributeDef( id=0x0006, type=t.CharacterString, access="r" ) color_temperature: Final = ZCLAttributeDef( id=0x0007, type=t.uint16_t, access="rps" ) color_mode: Final = ZCLAttributeDef( id=0x0008, type=ColorMode, access="r", mandatory=True ) options: Final = ZCLAttributeDef( id=0x000F, type=Options, access="rw", mandatory=True ) # Defined Primaries Information num_primaries: Final = ZCLAttributeDef(id=0x0010, type=t.uint8_t, access="r") primary1_x: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t, access="r") primary1_y: Final = ZCLAttributeDef(id=0x0012, type=t.uint16_t, access="r") primary1_intensity: Final = ZCLAttributeDef( id=0x0013, type=t.uint8_t, access="r" ) primary2_x: Final = ZCLAttributeDef(id=0x0015, type=t.uint16_t, access="r") primary2_y: Final = ZCLAttributeDef(id=0x0016, type=t.uint16_t, access="r") primary2_intensity: Final = ZCLAttributeDef( id=0x0017, type=t.uint8_t, access="r" ) primary3_x: Final = ZCLAttributeDef(id=0x0019, type=t.uint16_t, access="r") primary3_y: Final = ZCLAttributeDef(id=0x001A, type=t.uint16_t, access="r") primary3_intensity: Final = ZCLAttributeDef( id=0x001B, type=t.uint8_t, access="r" ) # Additional Defined Primaries Information primary4_x: Final = ZCLAttributeDef(id=0x0020, type=t.uint16_t, access="r") primary4_y: Final = ZCLAttributeDef(id=0x0021, type=t.uint16_t, access="r") primary4_intensity: Final = ZCLAttributeDef( id=0x0022, type=t.uint8_t, access="r" ) primary5_x: Final = ZCLAttributeDef(id=0x0024, type=t.uint16_t, access="r") primary5_y: Final = ZCLAttributeDef(id=0x0025, type=t.uint16_t, access="r") primary5_intensity: Final = ZCLAttributeDef( id=0x0026, type=t.uint8_t, access="r" ) primary6_x: Final = ZCLAttributeDef(id=0x0028, type=t.uint16_t, access="r") primary6_y: Final = ZCLAttributeDef(id=0x0029, type=t.uint16_t, access="r") primary6_intensity: Final = ZCLAttributeDef( id=0x002A, type=t.uint8_t, access="r" ) # Defined Color Point Settings white_point_x: Final = ZCLAttributeDef(id=0x0030, type=t.uint16_t, access="r") white_point_y: Final = ZCLAttributeDef(id=0x0031, type=t.uint16_t, access="r") color_point_r_x: Final = ZCLAttributeDef(id=0x0032, type=t.uint16_t, access="r") color_point_r_y: Final = ZCLAttributeDef(id=0x0033, type=t.uint16_t, access="r") color_point_r_intensity: Final = ZCLAttributeDef( id=0x0034, type=t.uint8_t, access="r" ) color_point_g_x: Final = ZCLAttributeDef(id=0x0036, type=t.uint16_t, access="r") color_point_g_y: Final = ZCLAttributeDef(id=0x0037, type=t.uint16_t, access="r") color_point_g_intensity: Final = ZCLAttributeDef( id=0x0038, type=t.uint8_t, access="r" ) color_point_b_x: Final = ZCLAttributeDef(id=0x003A, type=t.uint16_t, access="r") color_point_b_y: Final = ZCLAttributeDef(id=0x003B, type=t.uint16_t, access="r") color_point_b_intensity: Final = ZCLAttributeDef( id=0x003C, type=t.uint8_t, access="r" ) # ... enhanced_current_hue: Final = ZCLAttributeDef( id=0x4000, type=t.uint16_t, access="rs" ) enhanced_color_mode: Final = ZCLAttributeDef( id=0x4001, type=EnhancedColorMode, access="r", mandatory=True ) color_loop_active: Final = ZCLAttributeDef( id=0x4002, type=t.uint8_t, access="rs" ) color_loop_direction: Final = ZCLAttributeDef( id=0x4003, type=t.uint8_t, access="rs" ) color_loop_time: Final = ZCLAttributeDef( id=0x4004, type=t.uint16_t, access="rs" ) color_loop_start_enhanced_hue: Final = ZCLAttributeDef( id=0x4005, type=t.uint16_t, access="r" ) color_loop_stored_enhanced_hue: Final = ZCLAttributeDef( id=0x4006, type=t.uint16_t, access="r" ) color_capabilities: Final = ZCLAttributeDef( id=0x400A, type=ColorCapabilities, access="r", mandatory=True ) color_temp_physical_min: Final = ZCLAttributeDef( id=0x400B, type=t.uint16_t, access="r" ) color_temp_physical_max: Final = ZCLAttributeDef( id=0x400C, type=t.uint16_t, access="r" ) couple_color_temp_to_level_min: Final = ZCLAttributeDef( id=0x400D, type=t.uint16_t, access="r" ) start_up_color_temperature: Final = ZCLAttributeDef( id=0x4010, type=t.uint16_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): move_to_hue: Final = ZCLCommandDef( id=0x00, schema={ "hue": t.uint8_t, "direction": Direction, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_hue: Final = ZCLCommandDef( id=0x01, schema={ "move_mode": MoveMode, "rate": t.uint8_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) step_hue: Final = ZCLCommandDef( id=0x02, schema={ "step_mode": StepMode, "step_size": t.uint8_t, "transition_time": t.uint8_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_to_saturation: Final = ZCLCommandDef( id=0x03, schema={ "saturation": t.uint8_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_saturation: Final = ZCLCommandDef( id=0x04, schema={ "move_mode": MoveMode, "rate": t.uint8_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) step_saturation: Final = ZCLCommandDef( id=0x05, schema={ "step_mode": StepMode, "step_size": t.uint8_t, "transition_time": t.uint8_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_to_hue_and_saturation: Final = ZCLCommandDef( id=0x06, schema={ "hue": t.uint8_t, "saturation": t.uint8_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_to_color: Final = ZCLCommandDef( id=0x07, schema={ "color_x": t.uint16_t, "color_y": t.uint16_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_color: Final = ZCLCommandDef( id=0x08, schema={ "rate_x": t.uint16_t, "rate_y": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) step_color: Final = ZCLCommandDef( id=0x09, schema={ "step_x": t.uint16_t, "step_y": t.uint16_t, "duration": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_to_color_temp: Final = ZCLCommandDef( id=0x0A, schema={ "color_temp_mireds": t.uint16_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) enhanced_move_to_hue: Final = ZCLCommandDef( id=0x40, schema={ "enhanced_hue": t.uint16_t, "direction": Direction, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) enhanced_move_hue: Final = ZCLCommandDef( id=0x41, schema={ "move_mode": MoveMode, "rate": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) enhanced_step_hue: Final = ZCLCommandDef( id=0x42, schema={ "step_mode": StepMode, "step_size": t.uint16_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) enhanced_move_to_hue_and_saturation: Final = ZCLCommandDef( id=0x43, schema={ "enhanced_hue": t.uint16_t, "saturation": t.uint8_t, "transition_time": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) color_loop_set: Final = ZCLCommandDef( id=0x44, schema={ "update_flags": ColorLoopUpdateFlags, "action": ColorLoopAction, "direction": ColorLoopDirection, "time": t.uint16_t, "start_hue": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) stop_move_step: Final = ZCLCommandDef( id=0x47, schema={ "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) move_color_temp: Final = ZCLCommandDef( id=0x4B, schema={ "move_mode": MoveMode, "rate": t.uint16_t, "color_temp_min_mireds": t.uint16_t, "color_temp_max_mireds": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) step_color_temp: Final = ZCLCommandDef( id=0x4C, schema={ "step_mode": StepMode, "step_size": t.uint16_t, "transition_time": t.uint16_t, "color_temp_min_mireds": t.uint16_t, "color_temp_max_mireds": t.uint16_t, "options_mask?": t.bitmap8, "options_override?": t.bitmap8, }, direction=False, ) class BallastStatus(t.bitmap8): Non_operational = 0b00000001 Lamp_failure = 0b00000010 class LampAlarmMode(t.bitmap8): Lamp_burn_hours = 0b00000001 class Ballast(Cluster): """Attributes and commands for configuring a lighting ballast """ BallastStatus: Final = BallastStatus LampAlarmMode: Final = LampAlarmMode cluster_id: Final = 0x0301 ep_attribute: Final = "light_ballast" class AttributeDefs(BaseAttributeDefs): physical_min_level: Final = ZCLAttributeDef( id=0x0000, type=t.uint8_t, access="r", mandatory=True ) physical_max_level: Final = ZCLAttributeDef( id=0x0001, type=t.uint8_t, access="r", mandatory=True ) ballast_status: Final = ZCLAttributeDef( id=0x0002, type=BallastStatus, access="r" ) # Ballast Settings min_level: Final = ZCLAttributeDef( id=0x0010, type=t.uint8_t, access="rw", mandatory=True ) max_level: Final = ZCLAttributeDef( id=0x0011, type=t.uint8_t, access="rw", mandatory=True ) power_on_level: Final = ZCLAttributeDef(id=0x0012, type=t.uint8_t, access="rw") power_on_fade_time: Final = ZCLAttributeDef( id=0x0013, type=t.uint16_t, access="rw" ) intrinsic_ballast_factor: Final = ZCLAttributeDef( id=0x0014, type=t.uint8_t, access="rw" ) ballast_factor_adjustment: Final = ZCLAttributeDef( id=0x0015, type=t.uint8_t, access="rw" ) # Lamp Information lamp_quantity: Final = ZCLAttributeDef(id=0x0020, type=t.uint8_t, access="r") # Lamp Settings lamp_type: Final = ZCLAttributeDef( id=0x0030, type=t.LimitedCharString(16), access="rw" ) lamp_manufacturer: Final = ZCLAttributeDef( id=0x0031, type=t.LimitedCharString(16), access="rw" ) lamp_rated_hours: Final = ZCLAttributeDef( id=0x0032, type=t.uint24_t, access="rw" ) lamp_burn_hours: Final = ZCLAttributeDef( id=0x0033, type=t.uint24_t, access="rw" ) lamp_alarm_mode: Final = ZCLAttributeDef( id=0x0034, type=LampAlarmMode, access="rw" ) lamp_burn_hours_trip_point: Final = ZCLAttributeDef( id=0x0035, type=t.uint24_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR zigpy-0.62.3/zigpy/zcl/clusters/lightlink.py000066400000000000000000000223151456054056700211200ustar00rootroot00000000000000from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster from zigpy.zcl.foundation import BaseCommandDefs, ZCLCommandDef class LogicalType(t.enum2): Coordinator = 0b00 Router = 0b01 EndDevice = 0b10 class ZigbeeInformation(t.Struct): logical_type: LogicalType rx_on_when_idle: t.uint1_t reserved: t.uint5_t class ScanRequestInformation(t.Struct): # whether the device is factory new factory_new: t.uint1_t # whether the device is capable of assigning addresses address_assignment: t.uint1_t reserved1: t.uint2_t # indicate the device is capable of initiating a link (i.e., it supports the # touchlink commissioning cluster at the client side) or 0 otherwise (i.e., it does # not support the touchlink commissioning cluster at the client side). touchlink_initiator: t.uint1_t undefined: t.uint1_t reserved2: t.uint1_t # If the ZLL profile is implemented, this bit shall be set to 0. In all other case # (Profile Interop / ZigBee 3.0), this bit shall be set to 1 profile_interop: t.uint1_t class ScanResponseInformation(t.Struct): factory_new: t.uint1_t address_assignment: t.uint1_t reserved1: t.uint2_t touchlink_initiator: t.uint1_t touchlink_priority_request: t.uint1_t reserved2: t.uint1_t profile_interop: t.uint1_t class DeviceInfoRecord(t.Struct): ieee: t.EUI64 endpoint_id: t.uint8_t profile_id: t.uint8_t device_id: t.uint16_t version: t.uint8_t group_id_count: t.uint8_t sort: t.uint8_t class Status(t.enum8): Success = 0x00 Failure = 0x01 class GroupInfoRecord(t.Struct): group_id: t.Group group_type: t.uint8_t class EndpointInfoRecord(t.Struct): nwk_addr: t.NWK endpoint_id: t.uint8_t profile_id: t.uint16_t device_id: t.uint16_t version: t.uint8_t class LightLink(Cluster): cluster_id: Final = 0x1000 ep_attribute: Final = "lightlink" class ServerCommandDefs(BaseCommandDefs): scan: Final = ZCLCommandDef( id=0x00, schema={ "inter_pan_transaction_id": t.uint32_t, "zigbee_information": ZigbeeInformation, "touchlink_information": ScanRequestInformation, }, direction=False, ) device_info: Final = ZCLCommandDef( id=0x02, schema={"inter_pan_transaction_id": t.uint32_t, "start_index": t.uint8_t}, direction=False, ) identify: Final = ZCLCommandDef( id=0x06, schema={ "inter_pan_transaction_id": t.uint32_t, "identify_duration": t.uint16_t, }, direction=False, ) reset_to_factory_new: Final = ZCLCommandDef( id=0x07, schema={"inter_pan_transaction_id": t.uint32_t}, direction=False, ) network_start: Final = ZCLCommandDef( id=0x10, schema={ "inter_pan_transaction_id": t.uint32_t, "epid": t.EUI64, "key_index": t.uint8_t, "encrypted_network_key": t.KeyData, "logical_channel": t.uint8_t, "pan_id": t.PanId, "nwk_addr": t.NWK, "group_identifiers_begin": t.Group, "group_identifiers_end": t.Group, "free_network_addr_range_begin": t.NWK, "free_network_addr_range_end": t.NWK, "free_group_id_range_begin": t.Group, "free_group_id_range_end": t.Group, "initiator_ieee": t.EUI64, "initiator_nwk": t.NWK, }, direction=False, ) network_join_router: Final = ZCLCommandDef( id=0x12, schema={ "inter_pan_transaction_id": t.uint32_t, "epid": t.EUI64, "key_index": t.uint8_t, "encrypted_network_key": t.KeyData, "nwk_update_id": t.uint8_t, "logical_channel": t.uint8_t, "pan_id": t.PanId, "nwk_addr": t.NWK, "group_identifiers_begin": t.Group, "group_identifiers_end": t.Group, "free_network_addr_range_begin": t.NWK, "free_network_addr_range_end": t.NWK, "free_group_id_range_begin": t.Group, "free_group_id_range_end": t.Group, }, direction=False, ) network_join_end_device: Final = ZCLCommandDef( id=0x14, schema={ "inter_pan_transaction_id": t.uint32_t, "epid": t.EUI64, "key_index": t.uint8_t, "encrypted_network_key": t.KeyData, "nwk_update_id": t.uint8_t, "logical_channel": t.uint8_t, "pan_id": t.PanId, "nwk_addr": t.NWK, "group_identifiers_begin": t.Group, "group_identifiers_end": t.Group, "free_network_addr_range_begin": t.NWK, "free_network_addr_range_end": t.NWK, "free_group_id_range_begin": t.Group, "free_group_id_range_end": t.Group, }, direction=False, ) network_update: Final = ZCLCommandDef( id=0x16, schema={ "inter_pan_transaction_id": t.uint32_t, "epid": t.EUI64, "nwk_update_id": t.uint8_t, "logical_channel": t.uint8_t, "pan_id": t.PanId, "nwk_addr": t.NWK, }, direction=False, ) # Utility get_group_identifiers: Final = ZCLCommandDef( id=0x41, schema={ "start_index": t.uint8_t, }, direction=False, ) get_endpoint_list: Final = ZCLCommandDef( id=0x42, schema={ "start_index": t.uint8_t, }, direction=False, ) class ClientCommandDefs(BaseCommandDefs): scan_rsp: Final = ZCLCommandDef( id=0x01, schema={ "inter_pan_transaction_id": t.uint32_t, "rssi_correction": t.uint8_t, "zigbee_info": ZigbeeInformation, "touchlink_info": ScanResponseInformation, "key_bitmask": t.uint16_t, "response_id": t.uint32_t, "epid": t.EUI64, "nwk_update_id": t.uint8_t, "logical_channel": t.uint8_t, "pan_id": t.PanId, "nwk_addr": t.NWK, "num_sub_devices": t.uint8_t, "total_group_ids": t.uint8_t, "endpoint_id?": t.uint8_t, "profile_id?": t.uint16_t, "device_id?": t.uint16_t, "version?": t.uint8_t, "group_id_count?": t.uint8_t, }, direction=True, ) device_info_rsp: Final = ZCLCommandDef( id=0x03, schema={ "inter_pan_transaction_id": t.uint32_t, "num_sub_devices": t.uint8_t, "start_index": t.uint8_t, "device_info_records": t.LVList[DeviceInfoRecord], }, direction=True, ) network_start_rsp: Final = ZCLCommandDef( id=0x11, schema={ "inter_pan_transaction_id": t.uint32_t, "status": Status, "epid": t.EUI64, "nwk_update_id": t.uint8_t, "logical_channel": t.uint8_t, "pan_id": t.PanId, }, direction=True, ) network_join_router_rsp: Final = ZCLCommandDef( id=0x13, schema={ "inter_pan_transaction_id": t.uint32_t, "status": Status, }, direction=True, ) network_join_end_device_rsp: Final = ZCLCommandDef( id=0x15, schema={ "inter_pan_transaction_id": t.uint32_t, "status": Status, }, direction=True, ) # Utility endpoint_info: Final = ZCLCommandDef( id=0x40, schema={ "ieee_addr": t.EUI64, "nwk_addr": t.NWK, "endpoint_id": t.uint8_t, "profile_id": t.uint16_t, "device_id": t.uint16_t, "version": t.uint8_t, }, direction=True, ) get_group_identifiers_rsp: Final = ZCLCommandDef( id=0x41, schema={ "total": t.uint8_t, "start_index": t.uint8_t, "group_info_records": t.LVList[GroupInfoRecord], }, direction=True, ) get_endpoint_list_rsp: Final = ZCLCommandDef( id=0x42, schema={ "total": t.uint8_t, "start_index": t.uint8_t, "endpoint_info_records": t.LVList[EndpointInfoRecord], }, direction=True, ) zigpy-0.62.3/zigpy/zcl/clusters/manufacturer_specific.py000066400000000000000000000004161456054056700234720ustar00rootroot00000000000000from __future__ import annotations from typing import Final from zigpy.zcl import Cluster class ManufacturerSpecificCluster(Cluster): cluster_id_range = (0xFC00, 0xFFFF) ep_attribute: Final = "manufacturer_specific" name: Final = "Manufacturer Specific" zigpy-0.62.3/zigpy/zcl/clusters/measurement.py000066400000000000000000000472011456054056700214610ustar00rootroot00000000000000"""Measurement & Sensing Functional Domain""" from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster, foundation from zigpy.zcl.foundation import BaseAttributeDefs, ZCLAttributeDef class LightSensorType(t.enum8): Photodiode = 0x00 CMOS = 0x01 Unknown = 0xFF class IlluminanceMeasurement(Cluster): LightSensorType: Final = LightSensorType cluster_id: Final = 0x0400 name: Final = "Illuminance Measurement" ep_attribute: Final = "illuminance" class AttributeDefs(BaseAttributeDefs): measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") light_sensor_type: Final = ZCLAttributeDef( id=0x0004, type=LightSensorType, access="r" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class LevelStatus(t.enum8): Illuminance_On_Target = 0x00 Illuminance_Below_Target = 0x01 Illuminance_Above_Target = 0x02 class IlluminanceLevelSensing(Cluster): LevelStatus: Final = LevelStatus LightSensorType: Final = LightSensorType cluster_id: Final = 0x0401 name: Final = "Illuminance Level Sensing" ep_attribute: Final = "illuminance_level" class AttributeDefs(BaseAttributeDefs): level_status: Final = ZCLAttributeDef( id=0x0000, type=LevelStatus, access="r", mandatory=True ) light_sensor_type: Final = ZCLAttributeDef( id=0x0001, type=LightSensorType, access="r" ) # Illuminance Level Sensing Settings illuminance_target_level: Final = ZCLAttributeDef( id=0x0010, type=t.uint16_t, access="rw", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class TemperatureMeasurement(Cluster): cluster_id: Final = 0x0402 name: Final = "Temperature Measurement" ep_attribute: Final = "temperature" class AttributeDefs(BaseAttributeDefs): # Temperature Measurement Information measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.int16s, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.int16s, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.int16s, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") # 0x0010: ('min_percent_change', UNKNOWN), # 0x0011: ('min_absolute_change', UNKNOWN), cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class PressureMeasurement(Cluster): cluster_id: Final = 0x0403 name: Final = "Pressure Measurement" ep_attribute: Final = "pressure" class AttributeDefs(BaseAttributeDefs): # Pressure Measurement Information measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.int16s, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.int16s, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.int16s, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") # Extended attribute set scaled_value: Final = ZCLAttributeDef(id=0x0010, type=t.int16s, access="r") min_scaled_value: Final = ZCLAttributeDef(id=0x0011, type=t.int16s, access="r") max_scaled_value: Final = ZCLAttributeDef(id=0x0012, type=t.int16s, access="r") scaled_tolerance: Final = ZCLAttributeDef( id=0x0013, type=t.uint16_t, access="r" ) scale: Final = ZCLAttributeDef(id=0x0014, type=t.int8s, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class FlowMeasurement(Cluster): cluster_id: Final = 0x0404 name: Final = "Flow Measurement" ep_attribute: Final = "flow" class AttributeDefs(BaseAttributeDefs): measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class RelativeHumidity(Cluster): cluster_id: Final = 0x0405 name: Final = "Relative Humidity Measurement" ep_attribute: Final = "humidity" class AttributeDefs(BaseAttributeDefs): measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class Occupancy(t.bitmap8): Unoccupied = 0b00000000 Occupied = 0b00000001 class OccupancySensorType(t.enum8): PIR = 0x00 Ultrasonic = 0x01 PIR_and_Ultrasonic = 0x02 Physical_Contact = 0x03 class OccupancySensorTypeBitmap(t.bitmap8): PIR = 0b00000001 Ultrasonic = 0b00000010 Physical_Contact = 0b00000100 class OccupancySensing(Cluster): Occupancy: Final = Occupancy OccupancySensorType: Final = OccupancySensorType OccupancySensorTypeBitmap: Final = OccupancySensorTypeBitmap cluster_id: Final = 0x0406 name: Final = "Occupancy Sensing" ep_attribute: Final = "occupancy" class AttributeDefs(BaseAttributeDefs): # Occupancy Sensor Information occupancy: Final = ZCLAttributeDef( id=0x0000, type=Occupancy, access="rp", mandatory=True ) occupancy_sensor_type_bitmap: Final = ZCLAttributeDef( id=0x0001, type=t.bitmap8, access="r", mandatory=True ) # PIR Configuration pir_o_to_u_delay: Final = ZCLAttributeDef( id=0x0010, type=t.uint16_t, access="rw" ) pir_u_to_o_delay: Final = ZCLAttributeDef( id=0x0011, type=t.uint16_t, access="rw" ) pir_u_to_o_threshold: Final = ZCLAttributeDef( id=0x0012, type=t.uint8_t, access="rw" ) # Ultrasonic Configuration ultrasonic_o_to_u_delay: Final = ZCLAttributeDef( id=0x0020, type=t.uint16_t, access="rw" ) ultrasonic_u_to_o_delay: Final = ZCLAttributeDef( id=0x0021, type=t.uint16_t, access="rw" ) ultrasonic_u_to_o_threshold: Final = ZCLAttributeDef( id=0x0022, type=t.uint8_t, access="rw" ) # Physical Contact Configuration physical_contact_o_to_u_delay: Final = ZCLAttributeDef( id=0x0030, type=t.uint16_t, access="rw" ) physical_contact_u_to_o_delay: Final = ZCLAttributeDef( id=0x0031, type=t.uint16_t, access="rw" ) physical_contact_u_to_o_threshold: Final = ZCLAttributeDef( id=0x0032, type=t.uint8_t, access="rw" ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class LeafWetness(Cluster): cluster_id: Final = 0x0407 name: Final = "Leaf Wetness Measurement" ep_attribute: Final = "leaf_wetness" class AttributeDefs(BaseAttributeDefs): # Leaf Wetness Measurement Information measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class SoilMoisture(Cluster): cluster_id: Final = 0x0408 name: Final = "Soil Moisture Measurement" ep_attribute: Final = "soil_moisture" class AttributeDefs(BaseAttributeDefs): # Soil Moisture Measurement Information measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class PH(Cluster): cluster_id: Final = 0x0409 name: Final = "pH Measurement" ep_attribute: Final = "ph" class AttributeDefs(BaseAttributeDefs): # pH Measurement Information measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ElectricalConductivity(Cluster): cluster_id: Final = 0x040A name: Final = "Electrical Conductivity" ep_attribute: Final = "electrical_conductivity" class AttributeDefs(BaseAttributeDefs): measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class WindSpeed(Cluster): cluster_id: Final = 0x040B name: Final = "Wind Speed Measurement" ep_attribute: Final = "wind_speed" class AttributeDefs(BaseAttributeDefs): # Wind Speed Measurement Information measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rp", mandatory=True ) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.uint16_t, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.uint16_t, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.uint16_t, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class _ConcentrationMixin: """Mixin for the common attributes of the concentration measurement clusters""" class AttributeDefs(BaseAttributeDefs): measured_value: Final = ZCLAttributeDef( id=0x0000, type=t.Single, access="rp", mandatory=True ) # fraction of 1 (one) min_measured_value: Final = ZCLAttributeDef( id=0x0001, type=t.Single, access="r", mandatory=True ) max_measured_value: Final = ZCLAttributeDef( id=0x0002, type=t.Single, access="r", mandatory=True ) tolerance: Final = ZCLAttributeDef(id=0x0003, type=t.Single, access="r") cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class CarbonMonoxideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x040C name: Final = "Carbon Monoxide (CO) Concentration" ep_attribute: Final = "carbon_monoxide_concentration" class CarbonDioxideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x040D name: Final = "Carbon Dioxide (CO₂) Concentration" ep_attribute: Final = "carbon_dioxide_concentration" class EthyleneConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x040E name: Final = "Ethylene (CH₂) Concentration" ep_attribute: Final = "ethylene_concentration" class EthyleneOxideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x040F name: Final = "Ethylene Oxide (C₂H₄O) Concentration" ep_attribute: Final = "ethylene_oxide_concentration" class HydrogenConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0410 name: Final = "Hydrogen (H) Concentration" ep_attribute: Final = "hydrogen_concentration" class HydrogenSulfideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0411 name: Final = "Hydrogen Sulfide (H₂S) Concentration" ep_attribute: Final = "hydrogen_sulfide_concentration" class NitricOxideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0412 name: Final = "Nitric Oxide (NO) Concentration" ep_attribute: Final = "nitric_oxide_concentration" class NitrogenDioxideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0413 name: Final = "Nitrogen Dioxide (NO₂) Concentration" ep_attribute: Final = "nitrogen_dioxide_concentration" class OxygenConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0414 name: Final = "Oxygen (O₂) Concentration" ep_attribute: Final = "oxygen_concentration" class OzoneConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0415 name: Final = "Ozone (O₃) Concentration" ep_attribute: Final = "ozone_concentration" class SulfurDioxideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0416 name: Final = "Sulfur Dioxide (SO₂) Concentration" ep_attribute: Final = "sulfur_dioxide_concentration" class DissolvedOxygenConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0417 name: Final = "Dissolved Oxygen (DO) Concentration" ep_attribute: Final = "dissolved_oxygen_concentration" class BromateConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0418 name: Final = "Bromate Concentration" ep_attribute: Final = "bromate_concentration" class ChloraminesConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0419 name: Final = "Chloramines Concentration" ep_attribute: Final = "chloramines_concentration" class ChlorineConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x041A name: Final = "Chlorine Concentration" ep_attribute: Final = "chlorine_concentration" class FecalColiformAndEColiFraction(_ConcentrationMixin, Cluster): """Percent of positive samples""" cluster_id: Final = 0x041B name: Final = "Fecal coliform & E. Coli Fraction" ep_attribute: Final = "fecal_coliform_and_e_coli_fraction" class FluorideConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x041C # XXX: spec repeats 0x041B but this seems like a mistake name: Final = "Fluoride Concentration" ep_attribute: Final = "fluoride_concentration" class HaloaceticAcidsConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x041D name: Final = "Haloacetic Acids Concentration" ep_attribute: Final = "haloacetic_acids_concentration" class TotalTrihalomethanesConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x041E name: Final = "Total Trihalomethanes Concentration" ep_attribute: Final = "total_trihalomethanes_concentration" class TotalColiformBacteriaFraction(_ConcentrationMixin, Cluster): cluster_id: Final = 0x041F name: Final = "Total Coliform Bacteria Fraction" ep_attribute: Final = "total_coliform_bacteria_fraction" # XXX: is this a concentration? What are the units? class Turbidity(_ConcentrationMixin, Cluster): """Cloudiness of particles in water where an average person would notice a 5 or higher""" cluster_id: Final = 0x0420 name: Final = "Turbidity" ep_attribute: Final = "turbidity" class CopperConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0421 name: Final = "Copper Concentration" ep_attribute: Final = "copper_concentration" class LeadConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0422 name: Final = "Lead Concentration" ep_attribute: Final = "lead_concentration" class ManganeseConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0423 name: Final = "Manganese Concentration" ep_attribute: Final = "manganese_concentration" class SulfateConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0424 name: Final = "Sulfate Concentration" ep_attribute: Final = "sulfate_concentration" class BromodichloromethaneConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0425 name: Final = "Bromodichloromethane Concentration" ep_attribute: Final = "bromodichloromethane_concentration" class BromoformConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0426 name: Final = "Bromoform Concentration" ep_attribute: Final = "bromoform_concentration" class ChlorodibromomethaneConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0427 name: Final = "Chlorodibromomethane Concentration" ep_attribute: Final = "chlorodibromomethane_concentration" class ChloroformConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0428 name: Final = "Chloroform Concentration" ep_attribute: Final = "chloroform_concentration" class SodiumConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x0429 name: Final = "Sodium Concentration" ep_attribute: Final = "sodium_concentration" # XXX: is this a concentration? What are the units? class PM25(_ConcentrationMixin, Cluster): """Particulate Matter 2.5 microns or less""" cluster_id: Final = 0x042A name: Final = "PM2.5" ep_attribute: Final = "pm25" class FormaldehydeConcentration(_ConcentrationMixin, Cluster): cluster_id: Final = 0x042B name: Final = "Formaldehyde Concentration" ep_attribute: Final = "formaldehyde_concentration" zigpy-0.62.3/zigpy/zcl/clusters/protocol.py000066400000000000000000000403361456054056700207770ustar00rootroot00000000000000"""Protocol Interfaces Functional Domain""" from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class DateTime(t.Struct): date: t.uint32_t time: t.uint32_t class GenericTunnel(Cluster): cluster_id: Final = 0x0600 ep_attribute: Final = "generic_tunnel" class AttributeDefs(BaseAttributeDefs): max_income_trans_size: Final = ZCLAttributeDef(id=0x0001, type=t.uint16_t) max_outgo_trans_size: Final = ZCLAttributeDef(id=0x0002, type=t.uint16_t) protocol_addr: Final = ZCLAttributeDef(id=0x0003, type=t.LVBytes) class ServerCommandDefs(BaseCommandDefs): match_protocol_addr: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) class ClientCommandDefs(BaseCommandDefs): match_protocol_addr_response: Final = ZCLCommandDef( id=0x00, schema={}, direction=True ) advertise_protocol_address: Final = ZCLCommandDef( id=0x01, schema={}, direction=False ) class BacnetProtocolTunnel(Cluster): cluster_id: Final = 0x0601 ep_attribute: Final = "bacnet_tunnel" class ServerCommandDefs(BaseCommandDefs): transfer_npdu: Final = ZCLCommandDef( id=0x00, schema={"npdu": t.LVBytes}, direction=False ) class AnalogInputRegular(Cluster): cluster_id: Final = 0x0602 ep_attribute: Final = "bacnet_regular_analog_input" class AttributeDefs(BaseAttributeDefs): cov_increment: Final = ZCLAttributeDef(id=0x0016, type=t.Single) device_type: Final = ZCLAttributeDef(id=0x001F, type=t.CharacterString) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) update_interval: Final = ZCLAttributeDef(id=0x0076, type=t.uint8_t) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class AnalogInputExtended(Cluster): cluster_id: Final = 0x0603 ep_attribute: Final = "bacnet_extended_analog_input" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) deadband: Final = ZCLAttributeDef(id=0x0019, type=t.Single) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) high_limit: Final = ZCLAttributeDef(id=0x002D, type=t.Single) limit_enable: Final = ZCLAttributeDef(id=0x0034, type=t.bitmap8) low_limit: Final = ZCLAttributeDef(id=0x003B, type=t.Single) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # event_time_stamps: Final = ZCLAttributeDef(id=0x0082, type=t.Array[3, t.uint32_t]) # integer, time of day, or structure of (date, time of day)) class ServerCommandDefs(BaseCommandDefs): transfer_apdu: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) connect_req: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) disconnect_req: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) connect_status_noti: Final = ZCLCommandDef(id=0x03, schema={}, direction=False) class AnalogOutputRegular(Cluster): cluster_id: Final = 0x0604 ep_attribute: Final = "bacnet_regular_analog_output" class AttributeDefs(BaseAttributeDefs): cov_increment: Final = ZCLAttributeDef(id=0x0016, type=t.Single) device_type: Final = ZCLAttributeDef(id=0x001F, type=t.CharacterString) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) update_interval: Final = ZCLAttributeDef(id=0x0076, type=t.uint8_t) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class AnalogOutputExtended(Cluster): cluster_id: Final = 0x0605 ep_attribute: Final = "bacnet_extended_analog_output" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) deadband: Final = ZCLAttributeDef(id=0x0019, type=t.Single) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) high_limit: Final = ZCLAttributeDef(id=0x002D, type=t.Single) limit_enable: Final = ZCLAttributeDef(id=0x0034, type=t.bitmap8) low_limit: Final = ZCLAttributeDef(id=0x003B, type=t.Single) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # event_time_stamps: Final = ZCLAttributeDef(id=0x0082, type=t.Array[3, t.uint32_t]) # integer, time of day, or structure of (date, time of day)) class AnalogValueRegular(Cluster): cluster_id: Final = 0x0606 ep_attribute: Final = "bacnet_regular_analog_value" class AttributeDefs(BaseAttributeDefs): cov_increment: Final = ZCLAttributeDef(id=0x0016, type=t.Single) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class AnalogValueExtended(Cluster): cluster_id: Final = 0x0607 ep_attribute: Final = "bacnet_extended_analog_value" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) deadband: Final = ZCLAttributeDef(id=0x0019, type=t.Single) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) high_limit: Final = ZCLAttributeDef(id=0x002D, type=t.Single) limit_enable: Final = ZCLAttributeDef(id=0x0034, type=t.bitmap8) low_limit: Final = ZCLAttributeDef(id=0x003B, type=t.Single) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) class BinaryInputRegular(Cluster): cluster_id: Final = 0x0608 ep_attribute: Final = "bacnet_regular_binary_input" class AttributeDefs(BaseAttributeDefs): change_of_state_count: Final = ZCLAttributeDef(id=0x000F, type=t.uint32_t) change_of_state_time: Final = ZCLAttributeDef(id=0x0010, type=DateTime) device_type: Final = ZCLAttributeDef(id=0x001F, type=t.CharacterString) elapsed_active_time: Final = ZCLAttributeDef(id=0x0021, type=t.uint32_t) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) time_of_at_reset: Final = ZCLAttributeDef(id=0x0072, type=DateTime) time_of_sc_reset: Final = ZCLAttributeDef(id=0x0073, type=DateTime) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class BinaryInputExtended(Cluster): cluster_id: Final = 0x0609 ep_attribute: Final = "bacnet_extended_binary_input" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) alarm_value: Final = ZCLAttributeDef(id=0x0006, type=t.Bool) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # 0x0082: ZCLAttributeDef('event_time_stamps', type=TODO.array), # Array[3] of (16-bit unsigned # integer, time of day, or structure of (date, time of day)) class BinaryOutputRegular(Cluster): cluster_id: Final = 0x060A ep_attribute: Final = "bacnet_regular_binary_output" class AttributeDefs(BaseAttributeDefs): change_of_state_count: Final = ZCLAttributeDef(id=0x000F, type=t.uint32_t) change_of_state_time: Final = ZCLAttributeDef(id=0x0010, type=DateTime) device_type: Final = ZCLAttributeDef(id=0x001F, type=t.CharacterString) elapsed_active_time: Final = ZCLAttributeDef(id=0x0021, type=t.uint32_t) feed_back_value: Final = ZCLAttributeDef(id=0x0028, type=t.enum8) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) time_of_at_reset: Final = ZCLAttributeDef(id=0x0072, type=DateTime) time_of_sc_reset: Final = ZCLAttributeDef(id=0x0073, type=DateTime) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class BinaryOutputExtended(Cluster): cluster_id: Final = 0x060B ep_attribute: Final = "bacnet_extended_binary_output" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # 0x0082: ZCLAttributeDef('event_time_stamps', type=TODO.array), # Array[3] of (16-bit unsigned # integer, time of day, or structure of (date, time of day)) class BinaryValueRegular(Cluster): cluster_id: Final = 0x060C ep_attribute: Final = "bacnet_regular_binary_value" class AttributeDefs(BaseAttributeDefs): change_of_state_count: Final = ZCLAttributeDef(id=0x000F, type=t.uint32_t) change_of_state_time: Final = ZCLAttributeDef(id=0x0010, type=DateTime) elapsed_active_time: Final = ZCLAttributeDef(id=0x0021, type=t.uint32_t) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) time_of_at_reset: Final = ZCLAttributeDef(id=0x0072, type=DateTime) time_of_sc_reset: Final = ZCLAttributeDef(id=0x0073, type=DateTime) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class BinaryValueExtended(Cluster): cluster_id: Final = 0x060D ep_attribute: Final = "bacnet_extended_binary_value" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) alarm_value: Final = ZCLAttributeDef(id=0x0006, type=t.Bool) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # 0x0082: ZCLAttributeDef('event_time_stamps', type=TODO.array), # Array[3] of (16-bit unsigned # integer, time of day, or structure of (date, time of day)) class MultistateInputRegular(Cluster): cluster_id: Final = 0x060E ep_attribute: Final = "bacnet_regular_multistate_input" class AttributeDefs(BaseAttributeDefs): device_type: Final = ZCLAttributeDef(id=0x001F, type=t.CharacterString) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class MultistateInputExtended(Cluster): cluster_id: Final = 0x060F ep_attribute: Final = "bacnet_extended_multistate_input" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) alarm_value: Final = ZCLAttributeDef(id=0x0006, type=t.uint16_t) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) fault_values: Final = ZCLAttributeDef(id=0x0025, type=t.uint16_t) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # 0x0082: ZCLAttributeDef('event_time_stamps', type=TODO.array), # Array[3] of (16-bit unsigned # integer, time of day, or structure of (date, time of day)) class MultistateOutputRegular(Cluster): cluster_id: Final = 0x0610 ep_attribute: Final = "bacnet_regular_multistate_output" class AttributeDefs(BaseAttributeDefs): device_type: Final = ZCLAttributeDef(id=0x001F, type=t.CharacterString) feed_back_value: Final = ZCLAttributeDef(id=0x0028, type=t.enum8) object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class MultistateOutputExtended(Cluster): cluster_id: Final = 0x0611 ep_attribute: Final = "bacnet_extended_multistate_output" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # 0x0082: ZCLAttributeDef('event_time_stamps', type=TODO.array), # Array[3] of (16-bit unsigned # integer, time of day, or structure of (date, time of day)) class MultistateValueRegular(Cluster): cluster_id: Final = 0x0612 ep_attribute: Final = "bacnet_regular_multistate_value" class AttributeDefs(BaseAttributeDefs): object_id: Final = ZCLAttributeDef(id=0x004B, type=t.FixedList[4, t.uint8_t]) object_name: Final = ZCLAttributeDef(id=0x004D, type=t.CharacterString) object_type: Final = ZCLAttributeDef(id=0x004F, type=t.enum16) profile_name: Final = ZCLAttributeDef(id=0x00A8, type=t.CharacterString) class MultistateValueExtended(Cluster): cluster_id: Final = 0x0613 ep_attribute: Final = "bacnet_extended_multistate_value" class AttributeDefs(BaseAttributeDefs): acked_transitions: Final = ZCLAttributeDef(id=0x0000, type=t.bitmap8) alarm_value: Final = ZCLAttributeDef(id=0x0006, type=t.uint16_t) notification_class: Final = ZCLAttributeDef(id=0x0011, type=t.uint16_t) event_enable: Final = ZCLAttributeDef(id=0x0023, type=t.bitmap8) event_state: Final = ZCLAttributeDef(id=0x0024, type=t.enum8) fault_values: Final = ZCLAttributeDef(id=0x0025, type=t.uint16_t) notify_type: Final = ZCLAttributeDef(id=0x0048, type=t.enum8) time_delay: Final = ZCLAttributeDef(id=0x0071, type=t.uint8_t) # 0x0082: ZCLAttributeDef('event_time_stamps', type=TODO.array), # Array[3] of (16-bit unsigned # integer, time of day, or structure of (date, time of day)) zigpy-0.62.3/zigpy/zcl/clusters/security.py000066400000000000000000000353061456054056700210060ustar00rootroot00000000000000"""Security and Safety Functional Domain""" from __future__ import annotations from typing import Any, Final import zigpy.types as t from zigpy.typing import AddressingMode from zigpy.zcl import Cluster, foundation from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class ZoneState(t.enum8): Not_enrolled = 0x00 Enrolled = 0x01 class ZoneType(t.enum_factory(t.uint16_t, "manufacturer_specific")): """Zone type enum.""" Standard_CIE = 0x0000 Motion_Sensor = 0x000D Contact_Switch = 0x0015 Fire_Sensor = 0x0028 Water_Sensor = 0x002A Carbon_Monoxide_Sensor = 0x002B Personal_Emergency_Device = 0x002C Vibration_Movement_Sensor = 0x002D Remote_Control = 0x010F Key_Fob = 0x0115 Key_Pad = 0x021D Standard_Warning_Device = 0x0225 Glass_Break_Sensor = 0x0226 Security_Repeater = 0x0229 Invalid_Zone_Type = 0xFFFF class ZoneStatus(t.bitmap16): """ZoneStatus attribute.""" Alarm_1 = 0x0001 Alarm_2 = 0x0002 Tamper = 0x0004 Battery = 0x0008 Supervision_reports = 0x0010 Restore_reports = 0x0020 Trouble = 0x0040 AC_mains = 0x0080 Test = 0x0100 Battery_Defect = 0x0200 class EnrollResponse(t.enum8): """Enroll response code.""" Success = 0x00 Not_supported = 0x01 No_enroll_permit = 0x02 Too_many_zones = 0x03 class IasZone(Cluster): """The IAS Zone cluster defines an interface to the functionality of an IAS security zone device. IAS Zone supports up to two alarm types per zone, low battery reports and supervision of the IAS network. """ ZoneState: Final = ZoneState ZoneType: Final = ZoneType ZoneStatus: Final = ZoneStatus EnrollResponse: Final = EnrollResponse cluster_id: Final = 0x0500 name: Final = "IAS Zone" ep_attribute: Final = "ias_zone" class AttributeDefs(BaseAttributeDefs): # Zone Information zone_state: Final = ZCLAttributeDef( id=0x0000, type=ZoneState, access="r", mandatory=True ) zone_type: Final = ZCLAttributeDef( id=0x0001, type=ZoneType, access="r", mandatory=True ) zone_status: Final = ZCLAttributeDef( id=0x0002, type=ZoneStatus, access="r", mandatory=True ) # Zone Settings cie_addr: Final = ZCLAttributeDef( id=0x0010, type=t.EUI64, access="rw", mandatory=True ) zone_id: Final = ZCLAttributeDef( id=0x0011, type=t.uint8_t, access="r", mandatory=True ) # Both attributes will be supported/unsupported num_zone_sensitivity_levels_supported: Final = ZCLAttributeDef( id=0x0012, type=t.uint8_t, access="r" ) current_zone_sensitivity_level: Final = ZCLAttributeDef( id=0x0013, type=t.uint8_t, access="rw" ) class ServerCommandDefs(BaseCommandDefs): enroll_response: Final = ZCLCommandDef( id=0x00, schema={"enroll_response_code": EnrollResponse, "zone_id": t.uint8_t}, direction=True, ) init_normal_op_mode: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) init_test_mode: Final = ZCLCommandDef( id=0x02, schema={ "test_mode_duration": t.uint8_t, "current_zone_sensitivity_level": t.uint8_t, }, direction=False, ) class ClientCommandDefs(BaseCommandDefs): status_change_notification: Final = ZCLCommandDef( id=0x00, schema={ "zone_status": ZoneStatus, "extended_status": t.bitmap8, "zone_id": t.uint8_t, "delay": t.uint16_t, }, direction=False, ) enroll: Final = ZCLCommandDef( id=0x01, schema={"zone_type": ZoneType, "manufacturer_code": t.uint16_t}, direction=False, ) def handle_cluster_request( self, hdr: foundation.ZCLHeader, args: list[Any], *, dst_addressing: AddressingMode | None = None, ): if ( hdr.command_id == self.commands_by_name["enroll_response"].id and self.is_server and not hdr.frame_control.disable_default_response ): hdr.frame_control.is_reply = False # this is a client -> server cmd self.send_default_rsp(hdr, foundation.Status.SUCCESS) class AlarmStatus(t.enum8): """IAS ACE alarm status enum.""" No_Alarm = 0x00 Burglar = 0x01 Fire = 0x02 Emergency = 0x03 Police_Panic = 0x04 Fire_Panic = 0x05 Emergency_Panic = 0x06 class ArmMode(t.enum8): """IAS ACE arm mode enum.""" Disarm = 0x00 Arm_Day_Home_Only = 0x01 Arm_Night_Sleep_Only = 0x02 Arm_All_Zones = 0x03 class ArmNotification(t.enum8): """IAS ACE arm notification enum.""" All_Zones_Disarmed = 0x00 Only_Day_Home_Zones_Armed = 0x01 Only_Night_Sleep_Zones_Armed = 0x02 All_Zones_Armed = 0x03 Invalid_Arm_Disarm_Code = 0x04 Not_Ready_To_Arm = 0x05 Already_Disarmed = 0x06 class AudibleNotification(t.enum_factory(t.uint8_t, "manufacturer_specific")): """IAS ACE audible notification enum.""" Mute = 0x00 Default_Sound = 0x01 class BypassResponse(t.enum8): """Bypass result.""" Zone_bypassed = 0x00 Zone_not_bypassed = 0x01 Not_allowed = 0x02 Invalid_Zone_ID = 0x03 Unknown_Zone_ID = 0x04 Invalid_Code = 0x05 class PanelStatus(t.enum8): """IAS ACE panel status enum.""" Panel_Disarmed = 0x00 Armed_Stay = 0x01 Armed_Night = 0x02 Armed_Away = 0x03 Exit_Delay = 0x04 Entry_Delay = 0x05 Not_Ready_To_Arm = 0x06 In_Alarm = 0x07 Arming_Stay = 0x08 Arming_Night = 0x09 Arming_Away = 0x0A class ZoneStatusRsp(t.Struct): """Zone status response.""" zone_id: t.uint8_t zone_status: IasZone.ZoneStatus class IasAce(Cluster): """IAS Ancillary Control Equipment cluster.""" AlarmStatus: Final = AlarmStatus ArmMode: Final = ArmMode ArmNotification: Final = ArmNotification AudibleNotification: Final = AudibleNotification BypassResponse: Final = BypassResponse PanelStatus: Final = PanelStatus ZoneType: Final = IasZone.ZoneType ZoneStatus: Final = IasZone.ZoneStatus ZoneStatusRsp: Final = ZoneStatusRsp cluster_id: Final = 0x0501 name: Final = "IAS Ancillary Control Equipment" ep_attribute: Final = "ias_ace" class AttributeDefs(BaseAttributeDefs): cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): arm: Final = ZCLCommandDef( id=0x00, schema={ "arm_mode": ArmMode, "arm_disarm_code": t.CharacterString, "zone_id": t.uint8_t, }, direction=False, ) bypass: Final = ZCLCommandDef( id=0x01, schema={ "zones_ids": t.LVList[t.uint8_t], "arm_disarm_code": t.CharacterString, }, direction=False, ) emergency: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) fire: Final = ZCLCommandDef(id=0x03, schema={}, direction=False) panic: Final = ZCLCommandDef(id=0x04, schema={}, direction=False) get_zone_id_map: Final = ZCLCommandDef(id=0x05, schema={}, direction=False) get_zone_info: Final = ZCLCommandDef( id=0x06, schema={"zone_id": t.uint8_t}, direction=False ) get_panel_status: Final = ZCLCommandDef(id=0x07, schema={}, direction=False) get_bypassed_zone_list: Final = ZCLCommandDef( id=0x08, schema={}, direction=False ) get_zone_status: Final = ZCLCommandDef( id=0x09, schema={ "starting_zone_id": t.uint8_t, "max_num_zone_ids": t.uint8_t, "zone_status_mask_flag": t.Bool, "zone_status_mask": ZoneStatus, }, direction=False, ) class ClientCommandDefs(BaseCommandDefs): arm_response: Final = ZCLCommandDef( id=0x00, schema={"arm_notification": ArmNotification}, direction=True ) get_zone_id_map_response: Final = ZCLCommandDef( id=0x01, schema={"zone_id_map_sections": t.List[t.bitmap16]}, direction=True, ) get_zone_info_response: Final = ZCLCommandDef( id=0x02, schema={ "zone_id": t.uint8_t, "zone_type": ZoneType, "ieee": t.EUI64, "zone_label": t.CharacterString, }, direction=True, ) zone_status_changed: Final = ZCLCommandDef( id=0x03, schema={ "zone_id": t.uint8_t, "zone_status": ZoneStatus, "audible_notification": AudibleNotification, "zone_label": t.CharacterString, }, direction=False, ) panel_status_changed: Final = ZCLCommandDef( id=0x04, schema={ "panel_status": PanelStatus, "seconds_remaining": t.uint8_t, "audible_notification": AudibleNotification, "alarm_status": AlarmStatus, }, direction=False, ) panel_status_response: Final = ZCLCommandDef( id=0x05, schema={ "panel_status": PanelStatus, "seconds_remaining": t.uint8_t, "audible_notification": AudibleNotification, "alarm_status": AlarmStatus, }, direction=True, ) set_bypassed_zone_list: Final = ZCLCommandDef( id=0x06, schema={"zone_ids": t.LVList[t.uint8_t]}, direction=False ) bypass_response: Final = ZCLCommandDef( id=0x07, schema={"bypass_results": t.LVList[BypassResponse]}, direction=True ) get_zone_status_response: Final = ZCLCommandDef( id=0x08, schema={ "zone_status_complete": t.Bool, "zone_statuses": t.LVList[ZoneStatusRsp], }, direction=True, ) class Strobe(t.enum8): No_strobe = 0x00 Strobe = 0x01 class _SquawkOrWarningCommand: def __init__(self, value: int = 0) -> None: self.value = t.uint8_t(value) @classmethod def deserialize(cls, data: bytes) -> tuple[_SquawkOrWarningCommand, bytes]: val, data = t.uint8_t.deserialize(data) return cls(val), data def serialize(self) -> bytes: return t.uint8_t(self.value).serialize() def __repr__(self) -> str: return ( f"<{self.__class__.__name__}.mode={self.mode.name} " f"strobe={self.strobe.name} level={self.level.name}: " f"{self.value}>" ) def __eq__(self, other): """Compare to int.""" return self.value == other class StrobeLevel(t.enum8): Low_level_strobe = 0x00 Medium_level_strobe = 0x01 High_level_strobe = 0x02 Very_high_level_strobe = 0x03 class WarningType(_SquawkOrWarningCommand): Strobe = Strobe class SirenLevel(t.enum8): Low_level_sound = 0x00 Medium_level_sound = 0x01 High_level_sound = 0x02 Very_high_level_sound = 0x03 class WarningMode(t.enum8): Stop = 0x00 Burglar = 0x01 Fire = 0x02 Emergency = 0x03 Police_Panic = 0x04 Fire_Panic = 0x05 Emergency_Panic = 0x06 @property def mode(self) -> WarningMode: return self.WarningMode((self.value >> 4) & 0x0F) @mode.setter def mode(self, mode: WarningMode) -> None: self.value = (self.value & 0xF) | (mode << 4) @property def strobe(self) -> Strobe: return self.Strobe((self.value >> 2) & 0x01) @strobe.setter def strobe(self, strobe: Strobe) -> None: self.value = (self.value & 0xF7) | ( (strobe & 0x01) << 2 # type:ignore[operator] ) @property def level(self) -> SirenLevel: return self.SirenLevel(self.value & 0x03) @level.setter def level(self, level: SirenLevel) -> None: self.value = (self.value & 0xFC) | (level & 0x03) class Squawk(_SquawkOrWarningCommand): Strobe = Strobe class SquawkLevel(t.enum8): Low_level_sound = 0x00 Medium_level_sound = 0x01 High_level_sound = 0x02 Very_high_level_sound = 0x03 class SquawkMode(t.enum8): Armed = 0x00 Disarmed = 0x01 @property def mode(self) -> SquawkMode: return self.SquawkMode((self.value >> 4) & 0x0F) @mode.setter def mode(self, mode: SquawkMode) -> None: self.value = (self.value & 0xF) | ((mode & 0x0F) << 4) @property def strobe(self) -> Strobe: return self.Strobe((self.value >> 3) & 0x01) @strobe.setter def strobe(self, strobe: Strobe) -> None: self.value = (self.value & 0xF7) | (strobe << 3) # type:ignore[operator] @property def level(self) -> SquawkLevel: return self.SquawkLevel(self.value & 0x03) @level.setter def level(self, level: SquawkLevel) -> None: self.value = (self.value & 0xFC) | (level & 0x03) class IasWd(Cluster): """The IAS WD cluster provides an interface to the functionality of any Warning Device equipment of the IAS system. Using this cluster, a ZigBee enabled CIE device can access a ZigBee enabled IAS WD device and issue alarm warning indications (siren, strobe lighting, etc.) when a system alarm condition is detected """ StrobeLevel: Final = StrobeLevel Warning: Final = WarningType Squawk: Final = Squawk cluster_id: Final = 0x0502 name: Final = "IAS Warning Device" ep_attribute: Final = "ias_wd" class AttributeDefs(BaseAttributeDefs): max_duration: Final = ZCLAttributeDef( id=0x0000, type=t.uint16_t, access="rw", mandatory=True ) cluster_revision: Final = foundation.ZCL_CLUSTER_REVISION_ATTR reporting_status: Final = foundation.ZCL_REPORTING_STATUS_ATTR class ServerCommandDefs(BaseCommandDefs): start_warning: Final = ZCLCommandDef( id=0x00, schema={ "warning": WarningType, "warning_duration": t.uint16_t, "strobe_duty_cycle": t.uint8_t, "stobe_level": StrobeLevel, }, direction=False, ) squawk: Final = ZCLCommandDef( id=0x01, schema={"squawk": Squawk}, direction=False ) zigpy-0.62.3/zigpy/zcl/clusters/smartenergy.py000066400000000000000000000450661456054056700215030ustar00rootroot00000000000000from __future__ import annotations from typing import Final import zigpy.types as t from zigpy.zcl import Cluster from zigpy.zcl.foundation import ( BaseAttributeDefs, BaseCommandDefs, ZCLAttributeDef, ZCLCommandDef, ) class Price(Cluster): cluster_id: Final = 0x0700 ep_attribute: Final = "smartenergy_price" class Drlc(Cluster): cluster_id: Final = 0x0701 ep_attribute: Final = "smartenergy_drlc" class RegisteredTier(t.enum8): No_Tier = 0x00 Tier_1 = 0x01 Tier_2 = 0x02 Tier_3 = 0x03 Tier_4 = 0x04 Tier_5 = 0x05 Tier_6 = 0x06 Tier_7 = 0x07 Tier_8 = 0x08 Tier_9 = 0x09 Tier_10 = 0x0A Tier_11 = 0x0B Tier_12 = 0x0C Tier_13 = 0x0D Tier_14 = 0x0E Extended_Tier = 0x0F class Metering(Cluster): RegisteredTier: Final = RegisteredTier cluster_id: Final = 0x0702 ep_attribute: Final = "smartenergy_metering" class AttributeDefs(BaseAttributeDefs): current_summ_delivered: Final = ZCLAttributeDef( id=0x0000, type=t.uint48_t, access="r" ) current_summ_received: Final = ZCLAttributeDef( id=0x0001, type=t.uint48_t, access="r" ) current_max_demand_delivered: Final = ZCLAttributeDef( id=0x0002, type=t.uint48_t, access="r" ) current_max_demand_received: Final = ZCLAttributeDef( id=0x0003, type=t.uint48_t, access="r" ) dft_summ: Final = ZCLAttributeDef(id=0x0004, type=t.uint48_t, access="r") daily_freeze_time: Final = ZCLAttributeDef( id=0x0005, type=t.uint16_t, access="r" ) power_factor: Final = ZCLAttributeDef(id=0x0006, type=t.int8s, access="r") reading_snapshot_time: Final = ZCLAttributeDef( id=0x0007, type=t.UTCTime, access="r" ) current_max_demand_delivered_time: Final = ZCLAttributeDef( id=0x0008, type=t.UTCTime, access="r" ) current_max_demand_received_time: Final = ZCLAttributeDef( id=0x0009, type=t.UTCTime, access="r" ) default_update_period: Final = ZCLAttributeDef( id=0x000A, type=t.uint8_t, access="r" ) fast_poll_update_period: Final = ZCLAttributeDef( id=0x000B, type=t.uint8_t, access="r" ) current_block_period_consump_delivered: Final = ZCLAttributeDef( id=0x000C, type=t.uint48_t, access="r" ) daily_consump_target: Final = ZCLAttributeDef( id=0x000D, type=t.uint24_t, access="r" ) current_block: Final = ZCLAttributeDef(id=0x000E, type=t.enum8, access="r") profile_interval_period: Final = ZCLAttributeDef( id=0x000F, type=t.enum8, access="r" ) # 0x0010: ('interval_read_reporting_period', UNKNOWN), # Deprecated preset_reading_time: Final = ZCLAttributeDef( id=0x0011, type=t.uint16_t, access="r" ) volume_per_report: Final = ZCLAttributeDef( id=0x0012, type=t.uint16_t, access="r" ) flow_restriction: Final = ZCLAttributeDef(id=0x0013, type=t.uint8_t, access="r") supply_status: Final = ZCLAttributeDef(id=0x0014, type=t.enum8, access="r") current_in_energy_carrier_summ: Final = ZCLAttributeDef( id=0x0015, type=t.uint48_t, access="r" ) current_out_energy_carrier_summ: Final = ZCLAttributeDef( id=0x0016, type=t.uint48_t, access="r" ) inlet_temperature: Final = ZCLAttributeDef(id=0x0017, type=t.int24s, access="r") outlet_temperature: Final = ZCLAttributeDef( id=0x0018, type=t.int24s, access="r" ) control_temperature: Final = ZCLAttributeDef( id=0x0019, type=t.int24s, access="r" ) current_in_energy_carrier_demand: Final = ZCLAttributeDef( id=0x001A, type=t.int24s, access="r" ) current_out_energy_carrier_demand: Final = ZCLAttributeDef( id=0x001B, type=t.int24s, access="r" ) current_block_period_consump_received: Final = ZCLAttributeDef( id=0x001D, type=t.uint48_t, access="r" ) current_block_received: Final = ZCLAttributeDef( id=0x001E, type=t.uint48_t, access="r" ) dft_summation_received: Final = ZCLAttributeDef( id=0x001F, type=t.uint48_t, access="r" ) active_register_tier_delivered: Final = ZCLAttributeDef( id=0x0020, type=RegisteredTier, access="r" ) active_register_tier_received: Final = ZCLAttributeDef( id=0x0021, type=RegisteredTier, access="r" ) last_block_switch_time: Final = ZCLAttributeDef( id=0x0022, type=t.UTCTime, access="r" ) # 0x0100: ('change_reporting_profile', UNKNOWN), current_tier1_summ_delivered: Final = ZCLAttributeDef( id=0x0100, type=t.uint48_t, access="r" ) current_tier1_summ_received: Final = ZCLAttributeDef( id=0x0101, type=t.uint48_t, access="r" ) current_tier2_summ_delivered: Final = ZCLAttributeDef( id=0x0102, type=t.uint48_t, access="r" ) current_tier2_summ_received: Final = ZCLAttributeDef( id=0x0103, type=t.uint48_t, access="r" ) current_tier3_summ_delivered: Final = ZCLAttributeDef( id=0x0104, type=t.uint48_t, access="r" ) current_tier3_summ_received: Final = ZCLAttributeDef( id=0x0105, type=t.uint48_t, access="r" ) current_tier4_summ_delivered: Final = ZCLAttributeDef( id=0x0106, type=t.uint48_t, access="r" ) current_tier4_summ_received: Final = ZCLAttributeDef( id=0x0107, type=t.uint48_t, access="r" ) current_tier5_summ_delivered: Final = ZCLAttributeDef( id=0x0108, type=t.uint48_t, access="r" ) current_tier5_summ_received: Final = ZCLAttributeDef( id=0x0109, type=t.uint48_t, access="r" ) current_tier6_summ_delivered: Final = ZCLAttributeDef( id=0x010A, type=t.uint48_t, access="r" ) current_tier6_summ_received: Final = ZCLAttributeDef( id=0x010B, type=t.uint48_t, access="r" ) current_tier7_summ_delivered: Final = ZCLAttributeDef( id=0x010C, type=t.uint48_t, access="r" ) current_tier7_summ_received: Final = ZCLAttributeDef( id=0x010D, type=t.uint48_t, access="r" ) current_tier8_summ_delivered: Final = ZCLAttributeDef( id=0x010E, type=t.uint48_t, access="r" ) current_tier8_summ_received: Final = ZCLAttributeDef( id=0x010F, type=t.uint48_t, access="r" ) current_tier9_summ_delivered: Final = ZCLAttributeDef( id=0x0110, type=t.uint48_t, access="r" ) current_tier9_summ_received: Final = ZCLAttributeDef( id=0x0111, type=t.uint48_t, access="r" ) current_tier10_summ_delivered: Final = ZCLAttributeDef( id=0x0112, type=t.uint48_t, access="r" ) current_tier10_summ_received: Final = ZCLAttributeDef( id=0x0113, type=t.uint48_t, access="r" ) current_tier11_summ_delivered: Final = ZCLAttributeDef( id=0x0114, type=t.uint48_t, access="r" ) current_tier11_summ_received: Final = ZCLAttributeDef( id=0x0115, type=t.uint48_t, access="r" ) current_tier12_summ_delivered: Final = ZCLAttributeDef( id=0x0116, type=t.uint48_t, access="r" ) current_tier12_summ_received: Final = ZCLAttributeDef( id=0x0117, type=t.uint48_t, access="r" ) current_tier13_summ_delivered: Final = ZCLAttributeDef( id=0x0118, type=t.uint48_t, access="r" ) current_tier13_summ_received: Final = ZCLAttributeDef( id=0x0119, type=t.uint48_t, access="r" ) current_tier14_summ_delivered: Final = ZCLAttributeDef( id=0x011A, type=t.uint48_t, access="r" ) current_tier14_summ_received: Final = ZCLAttributeDef( id=0x011B, type=t.uint48_t, access="r" ) current_tier15_summ_delivered: Final = ZCLAttributeDef( id=0x011C, type=t.uint48_t, access="r" ) current_tier15_summ_received: Final = ZCLAttributeDef( id=0x011D, type=t.uint48_t, access="r" ) status: Final = ZCLAttributeDef(id=0x0200, type=t.bitmap8, access="r") remaining_battery_life: Final = ZCLAttributeDef( id=0x0201, type=t.uint8_t, access="r" ) hours_in_operation: Final = ZCLAttributeDef( id=0x0202, type=t.uint24_t, access="r" ) hours_in_fault: Final = ZCLAttributeDef(id=0x0203, type=t.uint24_t, access="r") extended_status: Final = ZCLAttributeDef(id=0x0204, type=t.bitmap64, access="r") remaining_battery_life_days: Final = ZCLAttributeDef( id=0x0205, type=t.uint16_t, access="r" ) current_meter_id: Final = ZCLAttributeDef(id=0x0206, type=t.LVBytes, access="r") iambient_consumption_indicator: Final = ZCLAttributeDef( id=0x0207, type=t.enum8, access="r" ) unit_of_measure: Final = ZCLAttributeDef(id=0x0300, type=t.enum8, access="r") multiplier: Final = ZCLAttributeDef(id=0x0301, type=t.uint24_t, access="r") divisor: Final = ZCLAttributeDef(id=0x0302, type=t.uint24_t, access="r") summation_formatting: Final = ZCLAttributeDef( id=0x0303, type=t.bitmap8, access="r" ) demand_formatting: Final = ZCLAttributeDef( id=0x0304, type=t.bitmap8, access="r" ) historical_consump_formatting: Final = ZCLAttributeDef( id=0x0305, type=t.bitmap8, access="r" ) metering_device_type: Final = ZCLAttributeDef( id=0x0306, type=t.bitmap8, access="r" ) site_id: Final = ZCLAttributeDef( id=0x0307, type=t.LimitedLVBytes(32), access="r" ) meter_serial_number: Final = ZCLAttributeDef( id=0x0308, type=t.LimitedLVBytes(24), access="r" ) energy_carrier_unit_of_meas: Final = ZCLAttributeDef( id=0x0309, type=t.enum8, access="r" ) energy_carrier_summ_formatting: Final = ZCLAttributeDef( id=0x030A, type=t.bitmap8, access="r" ) energy_carrier_demand_formatting: Final = ZCLAttributeDef( id=0x030B, type=t.bitmap8, access="r" ) temperature_unit_of_measure: Final = ZCLAttributeDef( id=0x030C, type=t.enum8, access="r" ) temperature_formatting: Final = ZCLAttributeDef( id=0x030D, type=t.bitmap8, access="r" ) module_serial_number: Final = ZCLAttributeDef( id=0x030E, type=t.LimitedLVBytes(24), access="r" ) operating_tariff_label_delivered: Final = ZCLAttributeDef( id=0x030F, type=t.LimitedLVBytes(24), access="r" ) operating_tariff_label_received: Final = ZCLAttributeDef( id=0x0310, type=t.LimitedLVBytes(24), access="r" ) customer_id_number: Final = ZCLAttributeDef( id=0x0311, type=t.LimitedLVBytes(24), access="r" ) alternative_unit_of_measure: Final = ZCLAttributeDef( id=0x0312, type=t.enum8, access="r" ) alternative_demand_formatting: Final = ZCLAttributeDef( id=0x0313, type=t.bitmap8, access="r" ) alternative_consumption_formatting: Final = ZCLAttributeDef( id=0x0314, type=t.bitmap8, access="r" ) instantaneous_demand: Final = ZCLAttributeDef( id=0x0400, type=t.int24s, access="r" ) currentday_consump_delivered: Final = ZCLAttributeDef( id=0x0401, type=t.uint24_t, access="r" ) currentday_consump_received: Final = ZCLAttributeDef( id=0x0402, type=t.uint24_t, access="r" ) previousday_consump_delivered: Final = ZCLAttributeDef( id=0x0403, type=t.uint24_t, access="r" ) previousday_consump_received: Final = ZCLAttributeDef( id=0x0404, type=t.uint24_t, access="r" ) cur_part_profile_int_start_time_delivered: Final = ZCLAttributeDef( id=0x0405, type=t.uint32_t, access="r" ) cur_part_profile_int_start_time_received: Final = ZCLAttributeDef( id=0x0406, type=t.uint32_t, access="r" ) cur_part_profile_int_value_delivered: Final = ZCLAttributeDef( id=0x0407, type=t.uint24_t, access="r" ) cur_part_profile_int_value_received: Final = ZCLAttributeDef( id=0x0408, type=t.uint24_t, access="r" ) current_day_max_pressure: Final = ZCLAttributeDef( id=0x0409, type=t.uint48_t, access="r" ) current_day_min_pressure: Final = ZCLAttributeDef( id=0x040A, type=t.uint48_t, access="r" ) previous_day_max_pressure: Final = ZCLAttributeDef( id=0x040B, type=t.uint48_t, access="r" ) previous_day_min_pressure: Final = ZCLAttributeDef( id=0x040C, type=t.uint48_t, access="r" ) current_day_max_demand: Final = ZCLAttributeDef( id=0x040D, type=t.int24s, access="r" ) previous_day_max_demand: Final = ZCLAttributeDef( id=0x040E, type=t.int24s, access="r" ) current_month_max_demand: Final = ZCLAttributeDef( id=0x040F, type=t.int24s, access="r" ) current_year_max_demand: Final = ZCLAttributeDef( id=0x0410, type=t.int24s, access="r" ) currentday_max_energy_carr_demand: Final = ZCLAttributeDef( id=0x0411, type=t.int24s, access="r" ) previousday_max_energy_carr_demand: Final = ZCLAttributeDef( id=0x0412, type=t.int24s, access="r" ) cur_month_max_energy_carr_demand: Final = ZCLAttributeDef( id=0x0413, type=t.int24s, access="r" ) cur_month_min_energy_carr_demand: Final = ZCLAttributeDef( id=0x0414, type=t.int24s, access="r" ) cur_year_max_energy_carr_demand: Final = ZCLAttributeDef( id=0x0415, type=t.int24s, access="r" ) cur_year_min_energy_carr_demand: Final = ZCLAttributeDef( id=0x0416, type=t.int24s, access="r" ) max_number_of_periods_delivered: Final = ZCLAttributeDef( id=0x0500, type=t.uint8_t, access="r" ) current_demand_delivered: Final = ZCLAttributeDef( id=0x0600, type=t.uint24_t, access="r" ) demand_limit: Final = ZCLAttributeDef(id=0x0601, type=t.uint24_t, access="r") demand_integration_period: Final = ZCLAttributeDef( id=0x0602, type=t.uint8_t, access="r" ) number_of_demand_subintervals: Final = ZCLAttributeDef( id=0x0603, type=t.uint8_t, access="r" ) demand_limit_arm_duration: Final = ZCLAttributeDef( id=0x0604, type=t.uint16_t, access="r" ) generic_alarm_mask: Final = ZCLAttributeDef( id=0x0800, type=t.bitmap16, access="r" ) electricity_alarm_mask: Final = ZCLAttributeDef( id=0x0801, type=t.bitmap32, access="r" ) gen_flow_pressure_alarm_mask: Final = ZCLAttributeDef( id=0x0802, type=t.bitmap16, access="r" ) water_specific_alarm_mask: Final = ZCLAttributeDef( id=0x0803, type=t.bitmap16, access="r" ) heat_cool_specific_alarm_mask: Final = ZCLAttributeDef( id=0x0804, type=t.bitmap16, access="r" ) gas_specific_alarm_mask: Final = ZCLAttributeDef( id=0x0805, type=t.bitmap16, access="r" ) extended_generic_alarm_mask: Final = ZCLAttributeDef( id=0x0806, type=t.bitmap48, access="r" ) manufacture_alarm_mask: Final = ZCLAttributeDef( id=0x0807, type=t.bitmap16, access="r" ) bill_to_date: Final = ZCLAttributeDef(id=0x0A00, type=t.uint32_t, access="r") bill_to_date_time_stamp: Final = ZCLAttributeDef( id=0x0A01, type=t.uint32_t, access="r" ) projected_bill: Final = ZCLAttributeDef(id=0x0A02, type=t.uint32_t, access="r") projected_bill_time_stamp: Final = ZCLAttributeDef( id=0x0A03, type=t.uint32_t, access="r" ) class ServerCommandDefs(BaseCommandDefs): get_profile: Final = ZCLCommandDef(id=0x00, schema={}, direction=False) req_mirror: Final = ZCLCommandDef(id=0x01, schema={}, direction=False) mirror_rem: Final = ZCLCommandDef(id=0x02, schema={}, direction=False) req_fast_poll_mode: Final = ZCLCommandDef(id=0x03, schema={}, direction=False) get_snapshot: Final = ZCLCommandDef(id=0x04, schema={}, direction=False) take_snapshot: Final = ZCLCommandDef(id=0x05, schema={}, direction=False) mirror_report_attr_response: Final = ZCLCommandDef( id=0x06, schema={}, direction=True ) class ClientCommandDefs(BaseCommandDefs): get_profile_response: Final = ZCLCommandDef(id=0x00, schema={}, direction=True) req_mirror_response: Final = ZCLCommandDef(id=0x01, schema={}, direction=True) mirror_rem_response: Final = ZCLCommandDef(id=0x02, schema={}, direction=True) req_fast_poll_mode_response: Final = ZCLCommandDef( id=0x03, schema={}, direction=True ) get_snapshot_response: Final = ZCLCommandDef(id=0x04, schema={}, direction=True) class Messaging(Cluster): cluster_id: Final = 0x0703 ep_attribute: Final = "smartenergy_messaging" class Tunneling(Cluster): cluster_id: Final = 0x0704 ep_attribute: Final = "smartenergy_tunneling" class Prepayment(Cluster): cluster_id: Final = 0x0705 ep_attribute: Final = "smartenergy_prepayment" class EnergyManagement(Cluster): cluster_id: Final = 0x0706 ep_attribute: Final = "smartenergy_energy_management" class Calendar(Cluster): cluster_id: Final = 0x0707 ep_attribute: Final = "smartenergy_calendar" class DeviceManagement(Cluster): cluster_id: Final = 0x0708 ep_attribute: Final = "smartenergy_device_management" class Events(Cluster): cluster_id: Final = 0x0709 ep_attribute: Final = "smartenergy_events" class MduPairing(Cluster): cluster_id: Final = 0x070A ep_attribute: Final = "smartenergy_mdu_pairing" class KeyEstablishment(Cluster): cluster_id: Final = 0x0800 ep_attribute: Final = "smartenergy_key_establishment" zigpy-0.62.3/zigpy/zcl/foundation.py000066400000000000000000001003541456054056700174350ustar00rootroot00000000000000from __future__ import annotations import dataclasses import enum import functools import keyword import typing import zigpy.types as t def _hex_uint16_repr(v: int) -> str: return t.uint16_t(v)._hex_repr() def ensure_valid_name(name: str | None) -> None: """Ensures that the name of an attribute or command is valid.""" if name is not None and not name.isidentifier(): raise ValueError(f"{name!r} is not a valid identifier name.") class Status(t.enum8): SUCCESS = 0x00 # Operation was successful. FAILURE = 0x01 # Operation was not successful NOT_AUTHORIZED = 0x7E # The sender of the command does not have RESERVED_FIELD_NOT_ZERO = 0x7F # A reserved field/subfield/bit contains a MALFORMED_COMMAND = 0x80 # The command appears to contain the wrong UNSUP_CLUSTER_COMMAND = 0x81 # The specified cluster command is not UNSUP_GENERAL_COMMAND = 0x82 # The specified general ZCL command is not UNSUP_MANUF_CLUSTER_COMMAND = 0x83 # A manufacturer specific unicast, UNSUP_MANUF_GENERAL_COMMAND = 0x84 # A manufacturer specific unicast, ZCL INVALID_FIELD = 0x85 # At least one field of the command contains an UNSUPPORTED_ATTRIBUTE = 0x86 # The specified attribute does not exist on INVALID_VALUE = 0x87 # Out of range error, or set to a reserved value. READ_ONLY = 0x88 # Attempt to write a read only attribute. INSUFFICIENT_SPACE = 0x89 # An operation (e.g. an attempt to create an DUPLICATE_EXISTS = 0x8A # An attempt to create an entry in a table failed NOT_FOUND = 0x8B # The requested information (e.g. table entry) UNREPORTABLE_ATTRIBUTE = 0x8C # Periodic reports cannot be issued for this INVALID_DATA_TYPE = 0x8D # The data type given for an attribute is INVALID_SELECTOR = 0x8E # The selector for an attribute is incorrect. WRITE_ONLY = 0x8F # A request has been made to read an attribute INCONSISTENT_STARTUP_STATE = 0x90 # Setting the requested values would put DEFINED_OUT_OF_BAND = 0x91 # An attempt has been made to write an INCONSISTENT = ( 0x92 # The supplied values (e.g., contents of table cells) are inconsistent ) ACTION_DENIED = 0x93 # The credentials presented by the device sending the TIMEOUT = 0x94 # The exchange was aborted due to excessive response time ABORT = 0x95 # Failed case when a client or a server decides to abort the upgrade process INVALID_IMAGE = 0x96 # Invalid OTA upgrade image (ex. failed signature WAIT_FOR_DATA = 0x97 # Server does not have data block available yet NO_IMAGE_AVAILABLE = 0x98 # No OTA upgrade image available for a particular client REQUIRE_MORE_IMAGE = 0x99 # The client still requires more OTA upgrade image NOTIFICATION_PENDING = 0x9A # The command has been received and is being processed HARDWARE_FAILURE = 0xC0 # An operation was unsuccessful due to a SOFTWARE_FAILURE = 0xC1 # An operation was unsuccessful due to a CALIBRATION_ERROR = 0xC2 # An error occurred during calibration UNSUPPORTED_CLUSTER = 0xC3 # The cluster is not supported @classmethod def _missing_(cls, value): chained = t.APSStatus(value) status = cls._member_type_.__new__(cls, chained.value) status._name_ = chained.name status._value_ = value return status class Analog: pass class Discrete: pass class Null: pass class Unknown(t.NoData): pass @dataclasses.dataclass() class TypeValue: type: t.uint8_t = dataclasses.field(default=None) value: typing.Any = dataclasses.field(default=None) def __init__(self, type: t.uint8_t | None = None, value: typing.Any = None) -> None: # "Copy constructor" if type is not None and value is None and isinstance(type, self.__class__): other = type type = other.type value = other.value self.type = type self.value = value def serialize(self) -> bytes: return self.type.to_bytes(1, "little") + self.value.serialize() @classmethod def deserialize(cls, data: bytes) -> tuple[TypeValue, bytes]: type, data = t.uint8_t.deserialize(data) python_type = DATA_TYPES[type][1] value, data = python_type.deserialize(data) return cls(type=type, value=value), data def __repr__(self) -> str: return ( f"{type(self).__name__}(" f"type={type(self.value).__name__}, value={self.value!r}" f")" ) class TypedCollection(TypeValue): @classmethod def deserialize(cls, data): type, data = t.uint8_t.deserialize(data) python_type = DATA_TYPES[type][1] values, data = t.LVList[python_type, t.uint16_t].deserialize(data) return cls(type=type, value=values), data class Array(TypedCollection): pass class Bag(TypedCollection): pass class Set(TypedCollection): pass # ToDo: Make this a real set? class DataTypes(dict): """DataTypes container.""" def __init__( self, data_types: dict[ int, tuple[ str, typing.Any, typing.Literal[Null] | typing.Literal[Discrete] | typing.Literal[Analog] | typing.Literal[None], ], ], ) -> None: super().__init__(data_types) self._idx_by_class = { _type: type_id for type_id, (name, _type, ad) in self.items() } def pytype_to_datatype_id(self, python_type: typing.Any) -> int: """Return Zigbee Datatype ID for a give python type.""" # We return the most specific parent class for cls in python_type.__mro__: if cls in self._idx_by_class: return self._idx_by_class[cls] return 0xFF class ZCLStructure(t.LVList, item_type=TypeValue, length_type=t.uint16_t): """ZCL Structure data type.""" DATA_TYPES = DataTypes( { 0x00: ("No data", t.NoData, Null), 0x08: ("General", t.data8, Discrete), 0x09: ("General", t.data16, Discrete), 0x0A: ("General", t.data24, Discrete), 0x0B: ("General", t.data32, Discrete), 0x0C: ("General", t.data40, Discrete), 0x0D: ("General", t.data48, Discrete), 0x0E: ("General", t.data56, Discrete), 0x0F: ("General", t.data64, Discrete), 0x10: ("Boolean", t.Bool, Discrete), 0x18: ("Bitmap", t.bitmap8, Discrete), 0x19: ("Bitmap", t.bitmap16, Discrete), 0x1A: ("Bitmap", t.bitmap24, Discrete), 0x1B: ("Bitmap", t.bitmap32, Discrete), 0x1C: ("Bitmap", t.bitmap40, Discrete), 0x1D: ("Bitmap", t.bitmap48, Discrete), 0x1E: ("Bitmap", t.bitmap56, Discrete), 0x1F: ("Bitmap", t.bitmap64, Discrete), 0x20: ("Unsigned Integer", t.uint8_t, Analog), 0x21: ("Unsigned Integer", t.uint16_t, Analog), 0x22: ("Unsigned Integer", t.uint24_t, Analog), 0x23: ("Unsigned Integer", t.uint32_t, Analog), 0x24: ("Unsigned Integer", t.uint40_t, Analog), 0x25: ("Unsigned Integer", t.uint48_t, Analog), 0x26: ("Unsigned Integer", t.uint56_t, Analog), 0x27: ("Unsigned Integer", t.uint64_t, Analog), 0x28: ("Signed Integer", t.int8s, Analog), 0x29: ("Signed Integer", t.int16s, Analog), 0x2A: ("Signed Integer", t.int24s, Analog), 0x2B: ("Signed Integer", t.int32s, Analog), 0x2C: ("Signed Integer", t.int40s, Analog), 0x2D: ("Signed Integer", t.int48s, Analog), 0x2E: ("Signed Integer", t.int56s, Analog), 0x2F: ("Signed Integer", t.int64s, Analog), 0x30: ("Enumeration", t.enum8, Discrete), 0x31: ("Enumeration", t.enum16, Discrete), 0x38: ("Floating point", t.Half, Analog), 0x39: ("Floating point", t.Single, Analog), 0x3A: ("Floating point", t.Double, Analog), 0x41: ("Octet string", t.LVBytes, Discrete), 0x42: ("Character string", t.CharacterString, Discrete), 0x43: ("Long octet string", t.LongOctetString, Discrete), 0x44: ("Long character string", t.LongCharacterString, Discrete), 0x48: ("Array", Array, Discrete), 0x4C: ("Structure", ZCLStructure, Discrete), 0x50: ("Set", Set, Discrete), 0x51: ("Bag", Bag, Discrete), 0xE0: ("Time of day", t.TimeOfDay, Analog), 0xE1: ("Date", t.Date, Analog), 0xE2: ("UTCTime", t.UTCTime, Analog), 0xE8: ("Cluster ID", t.ClusterId, Discrete), 0xE9: ("Attribute ID", t.AttributeId, Discrete), 0xEA: ("BACNet OID", t.BACNetOid, Discrete), 0xF0: ("IEEE address", t.EUI64, Discrete), 0xF1: ("128-bit security key", t.KeyData, Discrete), 0xFF: ("Unknown", Unknown, None), } ) class ReadAttributeRecord(t.Struct): """Read Attribute Record.""" attrid: t.uint16_t = t.StructField(repr=_hex_uint16_repr) status: Status value: TypeValue = t.StructField(requires=lambda s: s.status == Status.SUCCESS) class Attribute(t.Struct): attrid: t.uint16_t = t.StructField(repr=_hex_uint16_repr) value: TypeValue class WriteAttributesStatusRecord(t.Struct): status: Status attrid: t.uint16_t = t.StructField( requires=lambda s: s.status != Status.SUCCESS, repr=_hex_uint16_repr ) class WriteAttributesResponse(list): """Write Attributes response list. Response to Write Attributes request should contain only success status, in case when all attributes were successfully written or list of status + attr_id records for all failed writes. """ @classmethod def deserialize(cls, data: bytes) -> tuple[WriteAttributesResponse, bytes]: record, data = WriteAttributesStatusRecord.deserialize(data) r = cls([record]) if record.status == Status.SUCCESS: return r, data while len(data) >= 3: record, data = WriteAttributesStatusRecord.deserialize(data) r.append(record) return r, data def serialize(self): failed = [record for record in self if record.status != Status.SUCCESS] if failed: return b"".join( [WriteAttributesStatusRecord(i).serialize() for i in failed] ) return Status.SUCCESS.serialize() class ReportingDirection(t.enum8): SendReports = 0x00 ReceiveReports = 0x01 class AttributeReportingStatus(t.enum8): Pending = 0x00 Attribute_Reporting_Complete = 0x01 class AttributeReportingConfig: def __init__(self, other: AttributeReportingConfig | None = None) -> None: if isinstance(other, self.__class__): self.direction: ReportingDirection = other.direction self.attrid: t.uint16_t = other.attrid if self.direction == ReportingDirection.ReceiveReports: self.timeout: int = other.timeout return self.datatype: int = other.datatype self.min_interval: int = other.min_interval self.max_interval: int = other.max_interval self.reportable_change: int = other.reportable_change def serialize(self, *, _only_dir_and_attrid: bool = False) -> bytes: r = ReportingDirection(self.direction).serialize() r += t.uint16_t(self.attrid).serialize() if _only_dir_and_attrid: return r if self.direction == ReportingDirection.ReceiveReports: r += t.uint16_t(self.timeout).serialize() else: r += t.uint8_t(self.datatype).serialize() r += t.uint16_t(self.min_interval).serialize() r += t.uint16_t(self.max_interval).serialize() datatype = DATA_TYPES.get(self.datatype, None) if datatype and datatype[2] is Analog: datatype = datatype[1] r += datatype(self.reportable_change).serialize() return r @classmethod def deserialize( cls, data, *, _only_dir_and_attrid: bool = False ) -> tuple[AttributeReportingConfig, bytes]: self = cls() self.direction, data = ReportingDirection.deserialize(data) self.attrid, data = t.uint16_t.deserialize(data) # The report is only a direction and attribute if _only_dir_and_attrid: return self, data if self.direction == ReportingDirection.ReceiveReports: # Requesting things to be received by me self.timeout, data = t.uint16_t.deserialize(data) else: # Notifying that I will report things to you self.datatype, data = t.uint8_t.deserialize(data) datatype = DATA_TYPES[self.datatype] self.min_interval, data = t.uint16_t.deserialize(data) self.max_interval, data = t.uint16_t.deserialize(data) if datatype[2] is Analog: self.reportable_change, data = datatype[1].deserialize(data) return self, data def __repr__(self) -> str: r = f"{self.__class__.__name__}(" r += f"direction={self.direction}" r += f", attrid=0x{self.attrid:04X}" if self.direction == ReportingDirection.ReceiveReports: r += f", timeout={self.timeout}" elif hasattr(self, "datatype"): r += f", datatype={self.datatype}" r += f", min_interval={self.min_interval}" r += f", max_interval={self.max_interval}" if self.reportable_change is not None: r += f", reportable_change={self.reportable_change}" r += ")" return r class AttributeReportingConfigWithStatus(t.Struct): status: Status config: AttributeReportingConfig @classmethod def deserialize( cls, data: bytes ) -> tuple[AttributeReportingConfigWithStatus, bytes]: status, data = Status.deserialize(data) # FIXME: The reporting configuration will not include anything other than the # direction and the attribute ID when the status is not successful. This # information isn't a part of the attribute reporting config structure so we # have to pass it in externally. config, data = AttributeReportingConfig.deserialize( data, _only_dir_and_attrid=(status != Status.SUCCESS) ) return cls(status=status, config=config), data def serialize(self) -> bytes: return self.status.serialize() + self.config.serialize( _only_dir_and_attrid=(self.status != Status.SUCCESS) ) class ConfigureReportingResponseRecord(t.Struct): status: Status direction: ReportingDirection attrid: t.uint16_t = t.StructField(repr=_hex_uint16_repr) @classmethod def deserialize(cls, data: bytes) -> tuple[ConfigureReportingResponseRecord, bytes]: r = cls() r.status, data = Status.deserialize(data) if r.status == Status.SUCCESS: r.direction, data = t.Optional(t.uint8_t).deserialize(data) if r.direction is not None: r.direction = ReportingDirection(r.direction) r.attrid, data = t.Optional(t.uint16_t).deserialize(data) return r, data r.direction, data = ReportingDirection.deserialize(data) r.attrid, data = t.uint16_t.deserialize(data) return r, data def serialize(self): r = Status(self.status).serialize() if self.status != Status.SUCCESS: r += ReportingDirection(self.direction).serialize() r += t.uint16_t(self.attrid).serialize() return r def __repr__(self) -> str: r = f"{self.__class__.__name__}(status={self.status}" if self.status != Status.SUCCESS: r += f", direction={self.direction}, attrid={self.attrid}" r += ")" return r class ConfigureReportingResponse(t.List[ConfigureReportingResponseRecord]): # In the case of successful configuration of all attributes, only a single # attribute status record SHALL be included in the command, with the status # field set to SUCCESS and the direction and attribute identifier fields omitted def serialize(self): if not self: raise ValueError("Cannot serialize empty list") failed = [record for record in self if record.status != Status.SUCCESS] if not failed: return ConfigureReportingResponseRecord(status=Status.SUCCESS).serialize() # Note that attribute status records are not included for successfully # configured attributes, in order to save bandwidth. return b"".join( [ConfigureReportingResponseRecord(r).serialize() for r in failed] ) class ReadReportingConfigRecord(t.Struct): direction: t.uint8_t attrid: t.uint16_t class DiscoverAttributesResponseRecord(t.Struct): attrid: t.uint16_t datatype: t.uint8_t class AttributeAccessControl(t.bitmap8): READ = 0x01 WRITE = 0x02 REPORT = 0x04 class DiscoverAttributesExtendedResponseRecord(t.Struct): attrid: t.uint16_t datatype: t.uint8_t acl: AttributeAccessControl class FrameType(t.enum2): """ZCL Frame Type.""" GLOBAL_COMMAND = 0b00 CLUSTER_COMMAND = 0b01 RESERVED_2 = 0b10 RESERVED_3 = 0b11 class Direction(t.enum1): """ZCL frame control direction.""" Client_to_Server = 0 Server_to_Client = 1 @classmethod def _from_is_reply(cls, is_reply: bool) -> Direction: return cls.Server_to_Client if is_reply else cls.Client_to_Server class FrameControl(t.Struct, t.uint8_t): """The frame control field contains information defining the command type and other control flags. """ frame_type: FrameType is_manufacturer_specific: t.uint1_t direction: Direction disable_default_response: t.uint1_t reserved: t.uint3_t @classmethod def cluster( cls, direction: Direction = Direction.Client_to_Server, is_manufacturer_specific: bool = False, ): return cls( frame_type=FrameType.CLUSTER_COMMAND, is_manufacturer_specific=is_manufacturer_specific, direction=direction, disable_default_response=(direction == Direction.Server_to_Client), reserved=0b000, ) @classmethod def general( cls, direction: Direction = Direction.Client_to_Server, is_manufacturer_specific: bool = False, ): return cls( frame_type=FrameType.GLOBAL_COMMAND, is_manufacturer_specific=is_manufacturer_specific, direction=direction, disable_default_response=(direction == Direction.Server_to_Client), reserved=0b000, ) @property def is_cluster(self) -> bool: """Return True if command is a local cluster specific command.""" return bool(self.frame_type == FrameType.CLUSTER_COMMAND) @property def is_general(self) -> bool: """Return True if command is a global ZCL command.""" return bool(self.frame_type == FrameType.GLOBAL_COMMAND) class ZCLHeader(t.Struct): NO_MANUFACTURER_ID = -1 # type: typing.Literal frame_control: FrameControl manufacturer: t.uint16_t = t.StructField( requires=lambda hdr: hdr.frame_control.is_manufacturer_specific ) tsn: t.uint8_t command_id: t.uint8_t def __new__( cls: type[ZCLHeader], frame_control: FrameControl | None = None, manufacturer: t.uint16_t | None = None, tsn: int | t.uint8_t | None = None, command_id: int | GeneralCommand | None = None, ) -> ZCLHeader: # Allow "auto manufacturer ID" to be disabled in higher layers if manufacturer is cls.NO_MANUFACTURER_ID: manufacturer = None if frame_control is not None and manufacturer is not None: frame_control.is_manufacturer_specific = True return super().__new__(cls, frame_control, manufacturer, tsn, command_id) @property def direction(self) -> bool: """Return direction of Frame Control.""" return self.frame_control.direction def __setattr__( self, name: str, value: t.uint16_t | FrameControl | t.uint8_t | GeneralCommand | None, ) -> None: if name == "manufacturer" and value is self.NO_MANUFACTURER_ID: value = None super().__setattr__(name, value) if name == "manufacturer" and self.frame_control is not None: self.frame_control.is_manufacturer_specific = value is not None @classmethod def general( cls, tsn: int | t.uint8_t, command_id: int | t.uint8_t, manufacturer: int | t.uint16_t | None = None, direction: Direction = Direction.Client_to_Server, ) -> ZCLHeader: return cls( frame_control=FrameControl.general( direction=direction, is_manufacturer_specific=(manufacturer is not None), ), manufacturer=manufacturer, tsn=tsn, command_id=command_id, ) @classmethod def cluster( cls, tsn: int | t.uint8_t, command_id: int | t.uint8_t, manufacturer: int | t.uint16_t | None = None, direction: Direction = Direction.Client_to_Server, ) -> ZCLHeader: return cls( frame_control=FrameControl.cluster( direction=direction, is_manufacturer_specific=(manufacturer is not None), ), manufacturer=manufacturer, tsn=tsn, command_id=command_id, ) @dataclasses.dataclass(frozen=True) class ZCLCommandDef(t.BaseDataclassMixin): id: t.uint8_t = None schema: CommandSchema = None direction: Direction = None is_manufacturer_specific: bool = None # set later name: str = None def __post_init__(self) -> None: # Backwards compatibility with positional syntax where the name was first if isinstance(self.id, str): object.__setattr__(self, "name", self.id) object.__setattr__(self, "id", None) ensure_valid_name(self.name) if isinstance(self.direction, bool): object.__setattr__( self, "direction", Direction._from_is_reply(self.direction) ) def with_compiled_schema(self) -> ZCLCommandDef: """Return a copy of the ZCL command definition object with its dictionary command schema converted into a `CommandSchema` subclass. """ if isinstance(self.schema, tuple): raise ValueError( f"Tuple schemas are deprecated: {self.schema!r}. Use a dictionary or a" f" Struct subclass." ) elif not isinstance(self.schema, dict): # If the schema is already a struct, do nothing self.schema.command = self return self assert self.id is not None assert self.name is not None cls_attrs = { "__annotations__": {}, "command": self, } for name, param_type in self.schema.items(): plain_name = name.rstrip("?") # Make sure parameters with names like "foo bar" and "class" can't exist if not plain_name.isidentifier() or keyword.iskeyword(plain_name): raise ValueError( f"Schema parameter {name} must be a valid Python identifier" ) cls_attrs["__annotations__"][plain_name] = "None" cls_attrs[plain_name] = t.StructField( type=param_type, optional=name.endswith("?"), ) schema = type(self.name, (CommandSchema,), cls_attrs) return self.replace(schema=schema) def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" f"id=0x{self.id:02X}, " f"name={self.name!r}, " f"direction={self.direction}, " f"schema={self.schema}, " f"is_manufacturer_specific={self.is_manufacturer_specific}" f")" ) class CommandSchema(t.Struct, tuple): """Struct subclass that behaves more like a tuple.""" command: ZCLCommandDef = None def __iter__(self): return iter(self.as_tuple()) def __getitem__( self, item: slice | typing.SupportsIndex ) -> typing.Any | tuple[typing.Any, ...]: return self.as_tuple()[item] def __len__(self) -> int: return len(self.as_tuple()) def __eq__(self, other) -> bool: if isinstance(other, tuple) and not isinstance(other, type(self)): return self.as_tuple() == other return super().__eq__(other) class ZCLAttributeAccess(enum.Flag): NONE = 0 Read = 1 Write = 2 Write_Optional = 4 Report = 8 Scene = 16 _names: dict[ZCLAttributeAccess, str] @classmethod @functools.lru_cache(None) def from_str(cls: ZCLAttributeAccess, value: str) -> ZCLAttributeAccess: orig_value = value access = cls.NONE while value: for mode, prefix in cls._names.items(): if value.startswith(prefix): value = value[len(prefix) :] access |= mode break else: raise ValueError(f"Invalid access mode: {orig_value!r}") return cls(access) ZCLAttributeAccess._names = { ZCLAttributeAccess.Write_Optional: "*w", ZCLAttributeAccess.Write: "w", ZCLAttributeAccess.Read: "r", ZCLAttributeAccess.Report: "p", ZCLAttributeAccess.Scene: "s", } @dataclasses.dataclass(frozen=True) class ZCLAttributeDef(t.BaseDataclassMixin): id: t.uint16_t = None type: type = None access: ZCLAttributeAccess = dataclasses.field( default=( ZCLAttributeAccess.Read | ZCLAttributeAccess.Write | ZCLAttributeAccess.Report ), ) mandatory: bool = False is_manufacturer_specific: bool = False # The name will be specified later name: str = None def __post_init__(self) -> None: # Backwards compatibility with positional syntax where the name was first if isinstance(self.id, str): object.__setattr__(self, "name", self.id) object.__setattr__(self, "id", None) if self.id is not None and not isinstance(self.id, t.uint16_t): object.__setattr__(self, "id", t.uint16_t(self.id)) if isinstance(self.access, str): object.__setattr__(self, "access", ZCLAttributeAccess.from_str(self.access)) ensure_valid_name(self.name) def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" f"id=0x{self.id:04X}, " f"name={self.name!r}, " f"type={self.type}, " f"access={self.access!r}, " f"mandatory={self.mandatory!r}, " f"is_manufacturer_specific={self.is_manufacturer_specific}" f")" ) class IterableMemberMeta(type): def __iter__(cls) -> typing.Iterable[typing.Any]: for name in dir(cls): if not name.startswith("_"): yield getattr(cls, name) class BaseCommandDefs(metaclass=IterableMemberMeta): pass class BaseAttributeDefs(metaclass=IterableMemberMeta): pass class GeneralCommand(t.enum8): """ZCL Foundation General Command IDs.""" Read_Attributes = 0x00 Read_Attributes_rsp = 0x01 Write_Attributes = 0x02 Write_Attributes_Undivided = 0x03 Write_Attributes_rsp = 0x04 Write_Attributes_No_Response = 0x05 Configure_Reporting = 0x06 Configure_Reporting_rsp = 0x07 Read_Reporting_Configuration = 0x08 Read_Reporting_Configuration_rsp = 0x09 Report_Attributes = 0x0A Default_Response = 0x0B Discover_Attributes = 0x0C Discover_Attributes_rsp = 0x0D # Read_Attributes_Structured = 0x0e # Write_Attributes_Structured = 0x0f # Write_Attributes_Structured_rsp = 0x10 Discover_Commands_Received = 0x11 Discover_Commands_Received_rsp = 0x12 Discover_Commands_Generated = 0x13 Discover_Commands_Generated_rsp = 0x14 Discover_Attribute_Extended = 0x15 Discover_Attribute_Extended_rsp = 0x16 GENERAL_COMMANDS = COMMANDS = { GeneralCommand.Read_Attributes: ZCLCommandDef( schema={"attribute_ids": t.List[t.uint16_t]}, direction=Direction.Client_to_Server, ), GeneralCommand.Read_Attributes_rsp: ZCLCommandDef( schema={"status_records": t.List[ReadAttributeRecord]}, direction=Direction.Server_to_Client, ), GeneralCommand.Write_Attributes: ZCLCommandDef( schema={"attributes": t.List[Attribute]}, direction=Direction.Client_to_Server ), GeneralCommand.Write_Attributes_Undivided: ZCLCommandDef( schema={"attributes": t.List[Attribute]}, direction=Direction.Client_to_Server ), GeneralCommand.Write_Attributes_rsp: ZCLCommandDef( schema={"status_records": WriteAttributesResponse}, direction=Direction.Server_to_Client, ), GeneralCommand.Write_Attributes_No_Response: ZCLCommandDef( schema={"attributes": t.List[Attribute]}, direction=Direction.Client_to_Server ), GeneralCommand.Configure_Reporting: ZCLCommandDef( schema={"config_records": t.List[AttributeReportingConfig]}, direction=Direction.Client_to_Server, ), GeneralCommand.Configure_Reporting_rsp: ZCLCommandDef( schema={"status_records": ConfigureReportingResponse}, direction=Direction.Server_to_Client, ), GeneralCommand.Read_Reporting_Configuration: ZCLCommandDef( schema={"attribute_records": t.List[ReadReportingConfigRecord]}, direction=Direction.Client_to_Server, ), GeneralCommand.Read_Reporting_Configuration_rsp: ZCLCommandDef( schema={"attribute_configs": t.List[AttributeReportingConfigWithStatus]}, direction=Direction.Server_to_Client, ), GeneralCommand.Report_Attributes: ZCLCommandDef( schema={"attribute_reports": t.List[Attribute]}, direction=Direction.Client_to_Server, ), GeneralCommand.Default_Response: ZCLCommandDef( schema={"command_id": t.uint8_t, "status": Status}, direction=Direction.Server_to_Client, ), GeneralCommand.Discover_Attributes: ZCLCommandDef( schema={"start_attribute_id": t.uint16_t, "max_attribute_ids": t.uint8_t}, direction=Direction.Client_to_Server, ), GeneralCommand.Discover_Attributes_rsp: ZCLCommandDef( schema={ "discovery_complete": t.Bool, "attribute_info": t.List[DiscoverAttributesResponseRecord], }, direction=Direction.Server_to_Client, ), # Command.Read_Attributes_Structured: ZCLCommandDef(schema=(, ), direction=Direction.Client_to_Server), # Command.Write_Attributes_Structured: ZCLCommandDef(schema=(, ), direction=Direction.Client_to_Server), # Command.Write_Attributes_Structured_rsp: ZCLCommandDef(schema=(, ), direction=Direction.Server_to_Client), GeneralCommand.Discover_Commands_Received: ZCLCommandDef( schema={"start_command_id": t.uint8_t, "max_command_ids": t.uint8_t}, direction=Direction.Client_to_Server, ), GeneralCommand.Discover_Commands_Received_rsp: ZCLCommandDef( schema={"discovery_complete": t.Bool, "command_ids": t.List[t.uint8_t]}, direction=Direction.Server_to_Client, ), GeneralCommand.Discover_Commands_Generated: ZCLCommandDef( schema={"start_command_id": t.uint8_t, "max_command_ids": t.uint8_t}, direction=Direction.Client_to_Server, ), GeneralCommand.Discover_Commands_Generated_rsp: ZCLCommandDef( schema={"discovery_complete": t.Bool, "command_ids": t.List[t.uint8_t]}, direction=Direction.Server_to_Client, ), GeneralCommand.Discover_Attribute_Extended: ZCLCommandDef( schema={"start_attribute_id": t.uint16_t, "max_attribute_ids": t.uint8_t}, direction=Direction.Client_to_Server, ), GeneralCommand.Discover_Attribute_Extended_rsp: ZCLCommandDef( schema={ "discovery_complete": t.Bool, "extended_attr_info": t.List[DiscoverAttributesExtendedResponseRecord], }, direction=Direction.Server_to_Client, ), } for command_id, command_def in list(GENERAL_COMMANDS.items()): GENERAL_COMMANDS[command_id] = command_def.replace( id=command_id, name=command_id.name ).with_compiled_schema() ZCL_CLUSTER_REVISION_ATTR = ZCLAttributeDef( id=0xFFFD, type=t.uint16_t, access="r", mandatory=True ) ZCL_REPORTING_STATUS_ATTR = ZCLAttributeDef( id=0xFFFE, type=AttributeReportingStatus, access="r" ) zigpy-0.62.3/zigpy/zdo/000077500000000000000000000000001456054056700147165ustar00rootroot00000000000000zigpy-0.62.3/zigpy/zdo/__init__.py000066400000000000000000000166641456054056700170440ustar00rootroot00000000000000from __future__ import annotations import functools import logging from typing import Coroutine import zigpy.profiles import zigpy.types as t from zigpy.typing import AddressingMode import zigpy.util from . import types LOGGER = logging.getLogger(__name__) ZDO_ENDPOINT = 0 class ZDO(zigpy.util.CatchingTaskMixin, zigpy.util.ListenableMixin): """The ZDO endpoint of a device""" class LeaveOptions(t.bitmap8): """ZDO Mgmt_Leave_req Options.""" NONE = 0 RemoveChildren = 1 << 6 Rejoin = 1 << 7 def __init__(self, device): self._device = device self._listeners = {} def _serialize(self, command, *args): schema = types.CLUSTERS[command][1] data = t.serialize(args, schema) return data def deserialize(self, cluster_id, data): if cluster_id not in types.CLUSTERS: raise ValueError(f"Invalid ZDO cluster ID: 0x{cluster_id:04X}") _, param_types = types.CLUSTERS[cluster_id] hdr, data = types.ZDOHeader.deserialize(cluster_id, data) args, data = t.deserialize(data, param_types) if data: # TODO: Seems sane to check, but what should we do? self.warning("Data remains after deserializing ZDO frame: %r", data) return hdr, args def request(self, command, *args, use_ieee=False): data = self._serialize(command, *args) tsn = self.device.get_sequence() data = t.uint8_t(tsn).serialize() + data return self._device.request(0, command, 0, 0, tsn, data, use_ieee=use_ieee) def reply(self, command, *args, tsn=None, use_ieee=False): data = self._serialize(command, *args) if tsn is None: tsn = self.device.get_sequence() data = t.uint8_t(tsn).serialize() + data return self._device.reply(0, command, 0, 0, tsn, data, use_ieee=use_ieee) def handle_message( self, profile: int, cluster: int, hdr: types.ZDOHeader, args: list, *, dst_addressing: AddressingMode | None = None, ) -> None: self.debug("ZDO request %s: %s", hdr.command_id, args) handler = getattr(self, f"handle_{hdr.command_id.name.lower()}", None) if handler is not None: handler(hdr, *args, dst_addressing=dst_addressing) else: self.debug("No handler for ZDO request:%s(%s)", hdr.command_id, args) self.listener_event( f"zdo_{hdr.command_id.name.lower()}", self._device, dst_addressing, hdr, args, ) def handle_nwk_addr_req( self, hdr: types.ZDOHeader, ieee: t.EUI64, request_type: int, start_index: int | None = None, dst_addressing: AddressingMode | None = None, ): """Handle ZDO NWK Address request.""" app = self._device.application if ieee == app.state.node_info.ieee: self.create_catching_task( self.NWK_addr_rsp( 0, app.state.node_info.ieee, app.state.node_info.nwk, 0, 0, [], tsn=hdr.tsn, ) ) def handle_ieee_addr_req( self, hdr: types.ZDOHeader, nwk: t.NWK, request_type: int, start_index: int | None = None, dst_addressing: AddressingMode | None = None, ): """Handle ZDO IEEE Address request.""" app = self._device.application if nwk in ( t.BroadcastAddress.ALL_DEVICES, t.BroadcastAddress.RX_ON_WHEN_IDLE, t.BroadcastAddress.ALL_ROUTERS_AND_COORDINATOR, app.state.node_info.nwk, ): self.create_catching_task( self.IEEE_addr_rsp( 0, app.state.node_info.ieee, app.state.node_info.nwk, 0, 0, [], tsn=hdr.tsn, ) ) def handle_device_annce( self, hdr: types.ZDOHeader, nwk: t.NWK, ieee: t.EUI64, capability: int, dst_addressing: AddressingMode | None = None, ): """Handle ZDO device announcement request.""" self.listener_event("device_announce", self._device) def handle_mgmt_permit_joining_req( self, hdr: types.ZDOHeader, permit_duration: int, tc_significance: int, dst_addressing: AddressingMode | None = None, ): """Handle ZDO permit joining request.""" self.listener_event("permit_duration", permit_duration) def handle_match_desc_req( self, hdr: types.ZDOHeader, addr: t.NWK, profile: int, in_clusters: list, out_cluster: list, dst_addressing: AddressingMode | None = None, ): """Handle ZDO Match_desc_req request.""" local_addr = self._device.application.state.node_info.nwk if profile != zigpy.profiles.zha.PROFILE_ID: self.create_catching_task( self.Match_Desc_rsp(0, local_addr, [], tsn=hdr.tsn) ) return self.create_catching_task( self.Match_Desc_rsp(0, local_addr, [t.uint8_t(1)], tsn=hdr.tsn) ) def bind(self, cluster): return self.Bind_req( self._device.ieee, cluster.endpoint.endpoint_id, cluster.cluster_id, self.device.application.get_dst_address(cluster), ) def unbind(self, cluster): return self.Unbind_req( self._device.ieee, cluster.endpoint.endpoint_id, cluster.cluster_id, self.device.application.get_dst_address(cluster), ) def leave(self, remove_children: bool = True, rejoin: bool = False) -> Coroutine: opts = self.LeaveOptions.NONE if remove_children: opts |= self.LeaveOptions.RemoveChildren if rejoin: opts |= self.LeaveOptions.Rejoin return self.Mgmt_Leave_req(self._device.ieee, opts) def permit(self, duration=60, tc_significance=0): return self.Mgmt_Permit_Joining_req(duration, tc_significance) def log(self, lvl, msg, *args, **kwargs): msg = "[0x%04x:zdo] " + msg args = (self._device.nwk,) + args return LOGGER.log(lvl, msg, *args, **kwargs) @property def device(self): return self._device def __getattr__(self, name): try: command = types.ZDOCmd[name] except KeyError: raise AttributeError(f"No such '{name}' ZDO command") if command & 0x8000: return functools.partial(self.reply, command) return functools.partial(self.request, command) def broadcast( app, command, grpid, radius, *args, broadcast_address=t.BroadcastAddress.RX_ON_WHEN_IDLE, **kwargs, ): params, param_types = types.CLUSTERS[command] named_args = dict(zip(params, args)) named_args.update(kwargs) assert set(named_args.keys()) & set(params) sequence = app.get_sequence() data = bytes([sequence]) + t.serialize(named_args.values(), param_types) return zigpy.device.broadcast( app, 0, command, 0, 0, grpid, radius, sequence, data, broadcast_address=broadcast_address, ) zigpy-0.62.3/zigpy/zdo/types.py000066400000000000000000000570271456054056700164470ustar00rootroot00000000000000from __future__ import annotations import typing import zigpy.types as t class _PowerDescriptorEnums: class CurrentPowerMode(t.enum4): RxOnSyncedWithNodeDesc = 0b0000 RxOnPeriodically = 0b0001 RxOnWhenStimulated = 0b0010 class PowerSources(t.bitmap4): MainsPower = 0b0001 RechargeableBattery = 0b0010 DisposableBattery = 0b0100 Reserved = 0b1000 class PowerSourceLevel(t.enum4): Critical = 0b0000 Percent33 = 0b0100 Percent66 = 0b1000 Percent100 = 0b1100 class PowerDescriptor(t.Struct): CurrentPowerMode = _PowerDescriptorEnums.CurrentPowerMode PowerSources = _PowerDescriptorEnums.PowerSources PowerSourceLevel = _PowerDescriptorEnums.PowerSourceLevel current_power_mode: _PowerDescriptorEnums.CurrentPowerMode available_power_sources: _PowerDescriptorEnums.PowerSources current_power_source: _PowerDescriptorEnums.PowerSources current_power_source_level: _PowerDescriptorEnums.PowerSourceLevel class SimpleDescriptor(t.Struct): endpoint: t.uint8_t profile: t.uint16_t device_type: t.uint16_t device_version: t.uint8_t input_clusters: t.LVList[t.uint16_t] output_clusters: t.LVList[t.uint16_t] class SizePrefixedSimpleDescriptor(SimpleDescriptor): def serialize(self): data = super().serialize() return len(data).to_bytes(1, "little") + data @classmethod def deserialize(cls, data): if not data or data[0] == 0: return None, data[1:] return super().deserialize(data[1:]) class LogicalType(t.enum3): Coordinator = 0b000 Router = 0b001 EndDevice = 0b010 class _NodeDescriptorEnums: class MACCapabilityFlags(t.bitmap8): NONE = 0 AlternatePanCoordinator = 0b00000001 FullFunctionDevice = 0b00000010 MainsPowered = 0b00000100 RxOnWhenIdle = 0b00001000 SecurityCapable = 0b01000000 AllocateAddress = 0b10000000 class FrequencyBand(t.bitmap5): Freq868MHz = 0b00001 Freq902MHz = 0b00100 Freq2400MHz = 0b01000 class DescriptorCapability(t.bitmap8): NONE = 0 ExtendedActiveEndpointListAvailable = 0b00000001 ExtendedSimpleDescriptorListAvailable = 0b00000010 class NodeDescriptor(t.Struct): FrequencyBand = _NodeDescriptorEnums.FrequencyBand MACCapabilityFlags = _NodeDescriptorEnums.MACCapabilityFlags DescriptorCapability = _NodeDescriptorEnums.DescriptorCapability logical_type: LogicalType complex_descriptor_available: t.uint1_t user_descriptor_available: t.uint1_t reserved: t.uint3_t aps_flags: t.uint3_t frequency_band: _NodeDescriptorEnums.FrequencyBand mac_capability_flags: _NodeDescriptorEnums.MACCapabilityFlags manufacturer_code: t.uint16_t maximum_buffer_size: t.uint8_t maximum_incoming_transfer_size: t.uint16_t server_mask: t.uint16_t maximum_outgoing_transfer_size: t.uint16_t descriptor_capability_field: _NodeDescriptorEnums.DescriptorCapability def __new__(cls, *args, **kwargs): # Old style constructor if len(args) == 9 or "byte1" in kwargs or "byte2" in kwargs: return cls._old_constructor(*args, **kwargs) return super().__new__(cls, *args, **kwargs) @classmethod def _old_constructor( cls: NodeDescriptor, byte1: t.uint8_t = None, byte2: t.uint8_t = None, mac_capability_flags: MACCapabilityFlags = None, manufacturer_code: t.uint16_t = None, maximum_buffer_size: t.uint8_t = None, maximum_incoming_transfer_size: t.uint16_t = None, server_mask: t.uint16_t = None, maximum_outgoing_transfer_size: t.uint16_t = None, descriptor_capability_field: t.uint8_t = None, ) -> NodeDescriptor: logical_type = None complex_descriptor_available = None user_descriptor_available = None reserved = None if byte1 is not None: bits, _ = t.Bits.deserialize(bytes([byte1])) logical_type, bits = LogicalType.from_bits(bits) complex_descriptor_available, bits = t.uint1_t.from_bits(bits) user_descriptor_available, bits = t.uint1_t.from_bits(bits) reserved, bits = t.uint3_t.from_bits(bits) assert not bits aps_flags = None frequency_band = None if byte2 is not None: bits, _ = t.Bits.deserialize(bytes([byte2])) aps_flags, bits = t.uint3_t.from_bits(bits) frequency_band, bits = cls.FrequencyBand.from_bits(bits) assert not bits return cls( # type:ignore[operator] logical_type=logical_type, complex_descriptor_available=complex_descriptor_available, user_descriptor_available=user_descriptor_available, reserved=reserved, aps_flags=aps_flags, frequency_band=frequency_band, mac_capability_flags=mac_capability_flags, manufacturer_code=manufacturer_code, maximum_buffer_size=maximum_buffer_size, maximum_incoming_transfer_size=maximum_incoming_transfer_size, server_mask=server_mask, maximum_outgoing_transfer_size=maximum_outgoing_transfer_size, descriptor_capability_field=descriptor_capability_field, ) @property def is_end_device(self) -> bool | None: if self.logical_type is None: return None return self.logical_type == LogicalType.EndDevice @property def is_router(self) -> bool | None: if self.logical_type is None: return None return self.logical_type == LogicalType.Router @property def is_coordinator(self) -> bool | None: if self.logical_type is None: return None return self.logical_type == LogicalType.Coordinator @property def is_alternate_pan_coordinator(self) -> bool | None: if self.mac_capability_flags is None: return None return bool( self.mac_capability_flags & self.MACCapabilityFlags.AlternatePanCoordinator ) @property def is_full_function_device(self) -> bool | None: if self.mac_capability_flags is None: return None return bool( self.mac_capability_flags & self.MACCapabilityFlags.FullFunctionDevice ) @property def is_mains_powered(self) -> bool | None: if self.mac_capability_flags is None: return None return bool(self.mac_capability_flags & self.MACCapabilityFlags.MainsPowered) @property def is_receiver_on_when_idle(self) -> bool | None: if self.mac_capability_flags is None: return None return bool(self.mac_capability_flags & self.MACCapabilityFlags.RxOnWhenIdle) @property def is_security_capable(self) -> bool | None: if self.mac_capability_flags is None: return None return bool(self.mac_capability_flags & self.MACCapabilityFlags.SecurityCapable) @property def allocate_address(self) -> bool | None: if self.mac_capability_flags is None: return None return bool(self.mac_capability_flags & self.MACCapabilityFlags.AllocateAddress) class MultiAddress(t.Struct): """Used for binds, represents an IEEE+endpoint or NWK address""" addrmode: t.uint8_t nwk: t.uint16_t = t.StructField(requires=lambda s: s.addrmode == 0x01) ieee: t.EUI64 = t.StructField(requires=lambda s: s.addrmode == 0x03) endpoint: t.uint8_t = t.StructField(requires=lambda s: s.addrmode == 0x03) @classmethod def deserialize(cls, data): r, data = super().deserialize(data) if r.addrmode not in (0x01, 0x03): raise ValueError("Invalid MultiAddress - unknown address mode") return r, data def serialize(self): if self.addrmode not in (0x01, 0x03): raise ValueError("Invalid MultiAddress - unknown address mode") return super().serialize() class _NeighborEnums: class DeviceType(t.enum2): Coordinator = 0x0 Router = 0x1 EndDevice = 0x2 Unknown = 0x3 class RxOnWhenIdle(t.enum2): Off = 0x0 On = 0x1 Unknown = 0x2 class Relationship(t.enum3): Parent = 0x0 Child = 0x1 Sibling = 0x2 NoneOfTheAbove = 0x3 PreviousChild = 0x4 class PermitJoins(t.enum2): NotAccepting = 0x0 Accepting = 0x1 Unknown = 0x2 class Neighbor(t.Struct): """Neighbor Descriptor""" PermitJoins = _NeighborEnums.PermitJoins DeviceType = _NeighborEnums.DeviceType RxOnWhenIdle = _NeighborEnums.RxOnWhenIdle Relationship = _NeighborEnums.Relationship # Backwards-compatible alternate spelling RelationShip = Relationship extended_pan_id: t.ExtendedPanId ieee: t.EUI64 nwk: t.NWK device_type: _NeighborEnums.DeviceType rx_on_when_idle: _NeighborEnums.RxOnWhenIdle relationship: _NeighborEnums.Relationship reserved1: t.uint1_t permit_joining: _NeighborEnums.PermitJoins reserved2: t.uint6_t depth: t.uint8_t lqi: t.uint8_t @classmethod def _parse_packed(cls, packed: t.uint8_t) -> dict[str, typing.Any]: data = 18 * b"\x00" + t.uint16_t(packed).serialize() + 3 * b"\x00" tmp_neighbor, _ = cls.deserialize(data) return { "device_type": tmp_neighbor.device_type, "rx_on_when_idle": tmp_neighbor.rx_on_when_idle, "relationship": tmp_neighbor.relationship, "reserved1": tmp_neighbor.reserved1, } class Neighbors(t.Struct): """Mgmt_Lqi_rsp""" Entries: t.uint8_t StartIndex: t.uint8_t NeighborTableList: t.LVList[Neighbor] class RouteStatus(t.enum3): """Route descriptor route status.""" Active = 0x00 Discovery_Underway = 0x01 Discovery_Failed = 0x02 Inactive = 0x03 Validation_Underway = 0x04 Reserved_5 = 0x05 Reserved_6 = 0x06 Reserved_7 = 0x07 class Route(t.Struct): """Route Descriptor""" DstNWK: t.NWK RouteStatus: RouteStatus # Whether the device is a memory constrained concentrator. MemoryConstrained: t.uint1_t # The destination is a concentrator that issued a many-to-one request. ManyToOne: t.uint1_t # A route record command frame should be sent to the destination prior to the next # data packet. RouteRecordRequired: t.uint1_t Reserved: t.uint2_t NextHop: t.NWK class Routes(t.Struct): Entries: t.uint8_t StartIndex: t.uint8_t RoutingTableList: t.LVList[Route] class NwkUpdate(t.Struct): CHANNEL_CHANGE_REQ = 0xFE CHANNEL_MASK_MANAGER_ADDR_CHANGE_REQ = 0xFF ScanChannels: t.Channels ScanDuration: t.uint8_t ScanCount: t.uint8_t = t.StructField(requires=lambda s: s.ScanDuration <= 0x05) nwkUpdateId: t.uint8_t = t.StructField( requires=lambda s: s.ScanDuration in (s.CHANNEL_CHANGE_REQ, s.CHANNEL_MASK_MANAGER_ADDR_CHANGE_REQ) ) nwkManagerAddr: t.NWK = t.StructField( requires=lambda s: s.ScanDuration == s.CHANNEL_MASK_MANAGER_ADDR_CHANGE_REQ ) class Binding(t.Struct): SrcAddress: t.EUI64 SrcEndpoint: t.uint8_t ClusterId: t.uint16_t DstAddress: MultiAddress class AddrRequestType(t.enum8): Single = 0x00 Extended = 0x01 class Status(t.enum8): # The requested operation or transmission was completed successfully. SUCCESS = 0x00 # The supplied request type was invalid. INV_REQUESTTYPE = 0x80 # The requested device did not exist on a device following a child # descriptor request to a parent. DEVICE_NOT_FOUND = 0x81 # The supplied endpoint was equal to = 0x00 or between 0xf1 and 0xff. INVALID_EP = 0x82 # The requested endpoint is not described by a simple descriptor. NOT_ACTIVE = 0x83 # The requested optional feature is not supported on the target device. NOT_SUPPORTED = 0x84 # A timeout has occurred with the requested operation. TIMEOUT = 0x85 # The end device bind request was unsuccessful due to a failure to match # any suitable clusters. NO_MATCH = 0x86 # The unbind request was unsuccessful due to the coordinator or source # device not having an entry in its binding table to unbind. NO_ENTRY = 0x88 # A child descriptor was not available following a discovery request to a # parent. NO_DESCRIPTOR = 0x89 # The device does not have storage space to support the requested # operation. INSUFFICIENT_SPACE = 0x8A # The device is not in the proper state to support the requested operation. NOT_PERMITTED = 0x8B # The device does not have table space to support the operation. TABLE_FULL = 0x8C # The permissions configuration table on the target indicates that the # request is not authorized from this device. NOT_AUTHORIZED = 0x8D @classmethod def _missing_(cls, value): chained = t.APSStatus(value) status = cls._member_type_.__new__(cls, chained.value) status._name_ = chained.name status._value_ = value return status NWK = ("NWKAddr", t.NWK) NWKI = ("NWKAddrOfInterest", t.NWK) IEEE = ("IEEEAddr", t.EUI64) STATUS = ("Status", Status) class _CommandID(t.uint16_t, repr="hex"): pass class ZDOCmd(t.enum_factory(_CommandID)): # Device and Service Discovery Server Requests NWK_addr_req = 0x0000 IEEE_addr_req = 0x0001 Node_Desc_req = 0x0002 Power_Desc_req = 0x0003 Simple_Desc_req = 0x0004 Active_EP_req = 0x0005 Match_Desc_req = 0x0006 Complex_Desc_req = 0x0010 User_Desc_req = 0x0011 Discovery_Cache_req = 0x0012 Device_annce = 0x0013 User_Desc_set = 0x0014 System_Server_Discovery_req = 0x0015 Discovery_store_req = 0x0016 Node_Desc_store_req = 0x0017 Active_EP_store_req = 0x0019 Simple_Desc_store_req = 0x001A Remove_node_cache_req = 0x001B Find_node_cache_req = 0x001C Extended_Simple_Desc_req = 0x001D Extended_Active_EP_req = 0x001E Parent_annce = 0x001F # Bind Management Server Services Responses End_Device_Bind_req = 0x0020 Bind_req = 0x0021 Unbind_req = 0x0022 # Network Management Server Services Requests # ... TODO optional stuff ... Mgmt_Lqi_req = 0x0031 Mgmt_Rtg_req = 0x0032 Mgmt_Bind_req = 0x0033 # ... TODO optional stuff ... Mgmt_Leave_req = 0x0034 Mgmt_Permit_Joining_req = 0x0036 Mgmt_NWK_Update_req = 0x0038 # ... TODO optional stuff ... # Responses # Device and Service Discovery Server Responses NWK_addr_rsp = 0x8000 IEEE_addr_rsp = 0x8001 Node_Desc_rsp = 0x8002 Power_Desc_rsp = 0x8003 Simple_Desc_rsp = 0x8004 Active_EP_rsp = 0x8005 Match_Desc_rsp = 0x8006 Complex_Desc_rsp = 0x8010 User_Desc_rsp = 0x8011 Discovery_Cache_rsp = 0x8012 User_Desc_conf = 0x8014 System_Server_Discovery_rsp = 0x8015 Discovery_Store_rsp = 0x8016 Node_Desc_store_rsp = 0x8017 Power_Desc_store_rsp = 0x8018 Active_EP_store_rsp = 0x8019 Simple_Desc_store_rsp = 0x801A Remove_node_cache_rsp = 0x801B Find_node_cache_rsp = 0x801C Extended_Simple_Desc_rsp = 0x801D Extended_Active_EP_rsp = 0x801E Parent_annce_rsp = 0x801F # Bind Management Server Services Responses End_Device_Bind_rsp = 0x8020 Bind_rsp = 0x8021 Unbind_rsp = 0x8022 # ... TODO optional stuff ... # Network Management Server Services Responses Mgmt_Lqi_rsp = 0x8031 Mgmt_Rtg_rsp = 0x8032 Mgmt_Bind_rsp = 0x8033 # ... TODO optional stuff ... Mgmt_Leave_rsp = 0x8034 Mgmt_Permit_Joining_rsp = 0x8036 # ... TODO optional stuff ... Mgmt_NWK_Update_rsp = 0x8038 CLUSTERS = { # Device and Service Discovery Server Requests ZDOCmd.NWK_addr_req: ( IEEE, ("RequestType", AddrRequestType), ("StartIndex", t.uint8_t), ), ZDOCmd.IEEE_addr_req: ( NWKI, ("RequestType", AddrRequestType), ("StartIndex", t.uint8_t), ), ZDOCmd.Node_Desc_req: (NWKI,), ZDOCmd.Power_Desc_req: (NWKI,), ZDOCmd.Simple_Desc_req: (NWKI, ("EndPoint", t.uint8_t)), ZDOCmd.Active_EP_req: (NWKI,), ZDOCmd.Match_Desc_req: ( NWKI, ("ProfileID", t.uint16_t), ("InClusterList", t.LVList[t.uint16_t]), ("OutClusterList", t.LVList[t.uint16_t]), ), # ZDO.Complex_Desc_req: (NWKI, ), ZDOCmd.User_Desc_req: (NWKI,), ZDOCmd.Discovery_Cache_req: (NWK, IEEE), ZDOCmd.Device_annce: (NWK, IEEE, ("Capability", t.uint8_t)), ZDOCmd.User_Desc_set: ( NWKI, ("UserDescriptor", t.FixedList[16, t.uint8_t]), ), # Really a string ZDOCmd.System_Server_Discovery_req: (("ServerMask", t.uint16_t),), ZDOCmd.Discovery_store_req: ( NWK, IEEE, ("NodeDescSize", t.uint8_t), ("PowerDescSize", t.uint8_t), ("ActiveEPSize", t.uint8_t), ("SimpleDescSizeList", t.LVList[t.uint8_t]), ), ZDOCmd.Node_Desc_store_req: (NWK, IEEE, ("NodeDescriptor", NodeDescriptor)), ZDOCmd.Active_EP_store_req: (NWK, IEEE, ("ActiveEPList", t.LVList[t.uint8_t])), ZDOCmd.Simple_Desc_store_req: ( NWK, IEEE, ("SimpleDescriptor", SizePrefixedSimpleDescriptor), ), ZDOCmd.Remove_node_cache_req: (NWK, IEEE), ZDOCmd.Find_node_cache_req: (NWK, IEEE), ZDOCmd.Extended_Simple_Desc_req: ( NWKI, ("EndPoint", t.uint8_t), ("StartIndex", t.uint8_t), ), ZDOCmd.Extended_Active_EP_req: (NWKI, ("StartIndex", t.uint8_t)), ZDOCmd.Parent_annce: (("Children", t.LVList[t.EUI64]),), # Bind Management Server Services Responses ZDOCmd.End_Device_Bind_req: ( ("BindingTarget", t.uint16_t), ("SrcAddress", t.EUI64), ("SrcEndpoint", t.uint8_t), ("ProfileID", t.uint8_t), ("InClusterList", t.LVList[t.uint8_t]), ("OutClusterList", t.LVList[t.uint8_t]), ), ZDOCmd.Bind_req: ( ("SrcAddress", t.EUI64), ("SrcEndpoint", t.uint8_t), ("ClusterID", t.uint16_t), ("DstAddress", MultiAddress), ), ZDOCmd.Unbind_req: ( ("SrcAddress", t.EUI64), ("SrcEndpoint", t.uint8_t), ("ClusterID", t.uint16_t), ("DstAddress", MultiAddress), ), # Network Management Server Services Requests # ... TODO optional stuff ... ZDOCmd.Mgmt_Lqi_req: (("StartIndex", t.uint8_t),), ZDOCmd.Mgmt_Rtg_req: (("StartIndex", t.uint8_t),), ZDOCmd.Mgmt_Bind_req: (("StartIndex", t.uint8_t),), # ... TODO optional stuff ... ZDOCmd.Mgmt_Leave_req: (("DeviceAddress", t.EUI64), ("Options", t.bitmap8)), ZDOCmd.Mgmt_Permit_Joining_req: ( ("PermitDuration", t.uint8_t), ("TC_Significant", t.Bool), ), ZDOCmd.Mgmt_NWK_Update_req: (("NwkUpdate", NwkUpdate),), # ... TODO optional stuff ... # Responses # Device and Service Discovery Server Responses ZDOCmd.NWK_addr_rsp: ( STATUS, IEEE, NWK, ("NumAssocDev", t.Optional(t.uint8_t)), ("StartIndex", t.Optional(t.uint8_t)), ("NWKAddressAssocDevList", t.Optional(t.List[t.NWK])), ), ZDOCmd.IEEE_addr_rsp: ( STATUS, IEEE, NWK, ("NumAssocDev", t.Optional(t.uint8_t)), ("StartIndex", t.Optional(t.uint8_t)), ("NWKAddrAssocDevList", t.Optional(t.List[t.NWK])), ), ZDOCmd.Node_Desc_rsp: ( STATUS, NWKI, ("NodeDescriptor", t.Optional(NodeDescriptor)), ), ZDOCmd.Power_Desc_rsp: ( STATUS, NWKI, ("PowerDescriptor", t.Optional(PowerDescriptor)), ), ZDOCmd.Simple_Desc_rsp: ( STATUS, NWKI, ("SimpleDescriptor", t.Optional(SizePrefixedSimpleDescriptor)), ), ZDOCmd.Active_EP_rsp: (STATUS, NWKI, ("ActiveEPList", t.LVList[t.uint8_t])), ZDOCmd.Match_Desc_rsp: (STATUS, NWKI, ("MatchList", t.LVList[t.uint8_t])), # ZDO.Complex_Desc_rsp: ( # STATUS, # NWKI, # ('Length', t.uint8_t), # ('ComplexDescriptor', t.Optional(ComplexDescriptor)), # ), ZDOCmd.User_Desc_rsp: ( STATUS, NWKI, ("Length", t.uint8_t), ("UserDescriptor", t.Optional(t.FixedList[16, t.uint8_t])), ), ZDOCmd.Discovery_Cache_rsp: (STATUS,), ZDOCmd.User_Desc_conf: (STATUS, NWKI), ZDOCmd.System_Server_Discovery_rsp: (STATUS, ("ServerMask", t.uint16_t)), ZDOCmd.Discovery_Store_rsp: (STATUS,), ZDOCmd.Node_Desc_store_rsp: (STATUS,), ZDOCmd.Power_Desc_store_rsp: (STATUS, IEEE, ("PowerDescriptor", PowerDescriptor)), ZDOCmd.Active_EP_store_rsp: (STATUS,), ZDOCmd.Simple_Desc_store_rsp: (STATUS,), ZDOCmd.Remove_node_cache_rsp: (STATUS,), ZDOCmd.Find_node_cache_rsp: (("CacheNWKAddr", t.EUI64), NWK, IEEE), ZDOCmd.Extended_Simple_Desc_rsp: ( STATUS, NWK, ("Endpoint", t.uint8_t), ("AppInputClusterCount", t.uint8_t), ("AppOutputClusterCount", t.uint8_t), ("StartIndex", t.uint8_t), ("AppClusterList", t.Optional(t.List[t.uint16_t])), ), ZDOCmd.Extended_Active_EP_rsp: ( STATUS, NWKI, ("ActiveEPCount", t.uint8_t), ("StartIndex", t.uint8_t), ("ActiveEPList", t.List[t.uint8_t]), ), ZDOCmd.Parent_annce_rsp: (STATUS, ("Children", t.LVList[t.EUI64])), # Bind Management Server Services Responses ZDOCmd.End_Device_Bind_rsp: (STATUS,), ZDOCmd.Bind_rsp: (STATUS,), ZDOCmd.Unbind_rsp: (STATUS,), # ... TODO optional stuff ... # Network Management Server Services Responses ZDOCmd.Mgmt_Lqi_rsp: (STATUS, ("Neighbors", t.Optional(Neighbors))), ZDOCmd.Mgmt_Rtg_rsp: (STATUS, ("Routes", t.Optional(Routes))), ZDOCmd.Mgmt_Bind_rsp: ( STATUS, ("BindingTableEntries", t.uint8_t), ("StartIndex", t.uint8_t), ("BindingTableList", t.LVList[Binding]), ), # ... TODO optional stuff ... ZDOCmd.Mgmt_Leave_rsp: (STATUS,), ZDOCmd.Mgmt_Permit_Joining_rsp: (STATUS,), ZDOCmd.Mgmt_NWK_Update_rsp: ( STATUS, ("ScannedChannels", t.Channels), ("TotalTransmissions", t.uint16_t), ("TransmissionFailures", t.uint16_t), ("EnergyValues", t.LVList[t.uint8_t]), ) # ... TODO optional stuff ... } # Rewrite to (name, param_names, param_types) for command_id, schema in CLUSTERS.items(): param_names = [p[0] for p in schema] param_types = [p[1] for p in schema] CLUSTERS[command_id] = (param_names, param_types) class ZDOHeader: """Just a wrapper representing ZDO header, similar to ZCL header.""" def __init__(self, command_id: t.uint16_t = 0x0000, tsn: t.uint8_t = 0) -> None: self._command_id = ZDOCmd(command_id) self._tsn = t.uint8_t(tsn) @property def command_id(self) -> ZDOCmd: """Return ZDO command.""" return self._command_id @command_id.setter def command_id(self, value: t.uint16_t) -> None: """Command ID setter.""" self._command_id = ZDOCmd(value) @property def is_reply(self) -> bool: """Return True if this is a reply.""" return bool(self._command_id & 0x8000) @property def tsn(self) -> t.uint8_t: """Return transaction seq number.""" return self._tsn @tsn.setter def tsn(self, value: t.uint8_t) -> None: """Set TSN.""" self._tsn = t.uint8_t(value) @classmethod def deserialize( cls, command_id: t.uint16_t, data: bytes ) -> tuple[ZDOHeader, bytes]: """Deserialize data.""" tsn, data = t.uint8_t.deserialize(data) return cls(command_id, tsn), data def serialize(self) -> bytes: """Serialize header.""" return self.tsn.serialize()