pax_global_header00006660000000000000000000000064141762453710014524gustar00rootroot0000000000000052 comment=92649264871918c54f74d0b8fa690b1e8e27203b alembic-rel_1_7_6/000077500000000000000000000000001417624537100141375ustar00rootroot00000000000000alembic-rel_1_7_6/.coveragerc000066400000000000000000000000701417624537100162550ustar00rootroot00000000000000[run] include=alembic/* [report] omit=alembic/testing/*alembic-rel_1_7_6/.github/000077500000000000000000000000001417624537100154775ustar00rootroot00000000000000alembic-rel_1_7_6/.github/ISSUE_TEMPLATE/000077500000000000000000000000001417624537100176625ustar00rootroot00000000000000alembic-rel_1_7_6/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000015541417624537100223610ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: requires triage assignees: '' --- **Describe the bug** **Expected behavior** **To Reproduce** Please try to provide a [Minimal, Complete, and Verifiable](http://stackoverflow.com/help/mcve) example, with the migration script and/or the SQLAlchemy tables or models involved. See also [Reporting Bugs](https://www.sqlalchemy.org/participate.html#bugs) on the website. ```py # Insert code here ``` **Error** ``` # Copy error here. Please include the full stack trace. ``` **Versions.** - OS: - Python: - Alembic: - SQLAlchemy: - Database: - DBAPI: **Additional context** **Have a nice day!** alembic-rel_1_7_6/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000013361417624537100216550ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Usage Questions (GitHub Discussions) url: https://github.com/sqlalchemy/alembic/discussions/new?category=Usage-Questions about: Questions and Answers for Alembic Users - name: Live Chat on Gitter url: https://gitter.im/sqlalchemy/community about: Searchable Web-Based Chat - name: Ideas / Feature Proposal (GitHub Discussions) url: https://github.com/sqlalchemy/alembic/discussions/new?category=Ideas about: Use this for initial discussion for new features and suggestions - name: SQLAlchemy Community Guide url: https://www.sqlalchemy.org/support.html about: Start here for an overview of SQLAlchemy's support network and posting guidelines alembic-rel_1_7_6/.github/ISSUE_TEMPLATE/use_case.md000066400000000000000000000011361417624537100217740ustar00rootroot00000000000000--- name: Request a new use case about: Support for new SQL syntaxes, database capabilities, DBAPIs and DBAPI features title: '' labels: requires triage,use case assignees: '' --- **Describe the use case** **Databases / Backends / Drivers targeted** **Example Use** **Additional context** **Have a nice day!** alembic-rel_1_7_6/.github/pull_request_template.md000066400000000000000000000021431417624537100224400ustar00rootroot00000000000000 ### Description ### Checklist This pull request is: - [ ] A documentation / typographical error fix - Good to go, no issue or tests are needed - [ ] A short code fix - please include the issue number, and create an issue if none exists, which must include a complete example of the issue. one line code fixes without an issue and demonstration will not be accepted. - Please include: `Fixes: #` in the commit message - please include tests. one line code fixes without tests will not be accepted. - [ ] A new feature implementation - please include the issue number, and create an issue if none exists, which must include a complete example of how the feature would look. - Please include: `Fixes: #` in the commit message - please include tests. **Have a nice day!** alembic-rel_1_7_6/.github/workflows/000077500000000000000000000000001417624537100175345ustar00rootroot00000000000000alembic-rel_1_7_6/.github/workflows/run-on-pr.yaml000066400000000000000000000037421417624537100222630ustar00rootroot00000000000000name: Run tests on a pr on: # run on pull request to main excluding changes that are only on doc or example folders pull_request: branches: - main paths-ignore: - "docs/**" env: # global env to all steps TOX_WORKERS: -n2 jobs: run-test-amd64: name: ${{ matrix.python-version }}-${{ matrix.sqlalchemy }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - "ubuntu-latest" python-version: - "3.10" sqlalchemy: - sqla13 - sqla14 - sqlamain # abort all jobs as soon as one fails fail-fast: true # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo uses: actions/checkout@v2 - name: Set up python uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install --upgrade tox setuptools pip list - name: Run tests run: tox -e py-${{ matrix.sqlalchemy }} run-pep484: name: pep484-${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - "ubuntu-latest" python-version: - "3.10" fail-fast: false steps: - name: Checkout repo uses: actions/checkout@v2 - name: Set up python uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install --upgrade tox setuptools pip list - name: Run pep484 run: tox -e pep484 alembic-rel_1_7_6/.github/workflows/run-test.yaml000066400000000000000000000044161417624537100222060ustar00rootroot00000000000000name: Run tests on: # run on push in main or rel_* branches excluding changes are only on doc or example folders push: branches: - main - "rel_*" # branches used to test the workflow - "workflow_test_*" paths-ignore: - "docs/**" env: # global env to all steps TOX_WORKERS: -n2 jobs: run-test: name: ${{ matrix.python-version }}-${{ matrix.sqlalchemy }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - "ubuntu-latest" - "windows-latest" - "macos-latest" python-version: - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" sqlalchemy: - sqla13 - sqla14 - sqlamain exclude: # main no longer support 3.6 - sqlalchemy: sqlamain python-version: "3.6" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo uses: actions/checkout@v2 - name: Set up python uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install --upgrade tox setuptools pip list - name: Run tests run: tox -e py-${{ matrix.sqlalchemy }} run-pep484: name: pep484-${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - "ubuntu-latest" python-version: - "3.9" - "3.10" fail-fast: false steps: - name: Checkout repo uses: actions/checkout@v2 - name: Set up python uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install --upgrade tox setuptools pip list - name: Run tox pep484 run: tox -e pep484 alembic-rel_1_7_6/.gitignore000066400000000000000000000004061417624537100161270ustar00rootroot00000000000000*.pyc *.pyo /build/ dist/ /docs/build/output/ *.orig alembic.ini .venv /venv/ *.egg-info .coverage coverage.xml .tox *.patch /scratch /scratch_test_* /test_schema.db /test.cfg .idea/ .vscode/ .pytest_cache/ /docs/build/_build/ /pysqlite_test_schema.db *.sqlite3 alembic-rel_1_7_6/.gitreview000066400000000000000000000001361417624537100161450ustar00rootroot00000000000000[gerrit] host=gerrit.sqlalchemy.org project=sqlalchemy/alembic defaultbranch=main port=29418 alembic-rel_1_7_6/.pre-commit-config.yaml000066400000000000000000000012171417624537100204210ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/python/black rev: 21.5b1 hooks: - id: black - repo: https://github.com/sqlalchemyorg/zimports rev: v0.4.0 hooks: - id: zimports args: - --keep-unused-type-checking - repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 additional_dependencies: - flake8-import-order - flake8-builtins - flake8-docstrings - flake8-rst-docstrings - pydocstyle<4.0.0 - pygments alembic-rel_1_7_6/CHANGES000066400000000000000000000002621417624537100151320ustar00rootroot00000000000000===== MOVED ===== Please see: /docs/changelog.html /docs/build/changelog.rst or http://alembic.sqlalchemy.org/en/latest/changelog.html for the current CHANGES. alembic-rel_1_7_6/LICENSE000066400000000000000000000020421417624537100151420ustar00rootroot00000000000000Copyright 2009-2022 Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.alembic-rel_1_7_6/MANIFEST.in000066400000000000000000000005421417624537100156760ustar00rootroot00000000000000recursive-include docs *.html *.css *.txt *.js *.jpg *.png *.py Makefile *.rst *.sty recursive-include tests *.py *.dat *.pyi recursive-include alembic/templates *.mako README *.py *.pyi recursive-include alembic *.py *.pyi py.typed recursive-include tools *.py include README* LICENSE CHANGES* tox.ini prune docs/build/output exclude pyproject.toml alembic-rel_1_7_6/README.rst000066400000000000000000000131501417624537100156260ustar00rootroot00000000000000Alembic is a database migrations tool written by the author of `SQLAlchemy `_. A migrations tool offers the following functionality: * Can emit ALTER statements to a database in order to change the structure of tables and other constructs * Provides a system whereby "migration scripts" may be constructed; each script indicates a particular series of steps that can "upgrade" a target database to a new version, and optionally a series of steps that can "downgrade" similarly, doing the same steps in reverse. * Allows the scripts to execute in some sequential manner. The goals of Alembic are: * Very open ended and transparent configuration and operation. A new Alembic environment is generated from a set of templates which is selected among a set of options when setup first occurs. The templates then deposit a series of scripts that define fully how database connectivity is established and how migration scripts are invoked; the migration scripts themselves are generated from a template within that series of scripts. The scripts can then be further customized to define exactly how databases will be interacted with and what structure new migration files should take. * Full support for transactional DDL. The default scripts ensure that all migrations occur within a transaction - for those databases which support this (Postgresql, Microsoft SQL Server), migrations can be tested with no need to manually undo changes upon failure. * Minimalist script construction. Basic operations like renaming tables/columns, adding/removing columns, changing column attributes can be performed through one line commands like alter_column(), rename_table(), add_constraint(). There is no need to recreate full SQLAlchemy Table structures for simple operations like these - the functions themselves generate minimalist schema structures behind the scenes to achieve the given DDL sequence. * "auto generation" of migrations. While real world migrations are far more complex than what can be automatically determined, Alembic can still eliminate the initial grunt work in generating new migration directives from an altered schema. The ``--autogenerate`` feature will inspect the current status of a database using SQLAlchemy's schema inspection capabilities, compare it to the current state of the database model as specified in Python, and generate a series of "candidate" migrations, rendering them into a new migration script as Python directives. The developer then edits the new file, adding additional directives and data migrations as needed, to produce a finished migration. Table and column level changes can be detected, with constraints and indexes to follow as well. * Full support for migrations generated as SQL scripts. Those of us who work in corporate environments know that direct access to DDL commands on a production database is a rare privilege, and DBAs want textual SQL scripts. Alembic's usage model and commands are oriented towards being able to run a series of migrations into a textual output file as easily as it runs them directly to a database. Care must be taken in this mode to not invoke other operations that rely upon in-memory SELECTs of rows - Alembic tries to provide helper constructs like bulk_insert() to help with data-oriented operations that are compatible with script-based DDL. * Non-linear, dependency-graph versioning. Scripts are given UUID identifiers similarly to a DVCS, and the linkage of one script to the next is achieved via human-editable markers within the scripts themselves. The structure of a set of migration files is considered as a directed-acyclic graph, meaning any migration file can be dependent on any other arbitrary set of migration files, or none at all. Through this open-ended system, migration files can be organized into branches, multiple roots, and mergepoints, without restriction. Commands are provided to produce new branches, roots, and merges of branches automatically. * Provide a library of ALTER constructs that can be used by any SQLAlchemy application. The DDL constructs build upon SQLAlchemy's own DDLElement base and can be used standalone by any application or script. * At long last, bring SQLite and its inablity to ALTER things into the fold, but in such a way that SQLite's very special workflow needs are accommodated in an explicit way that makes the most of a bad situation, through the concept of a "batch" migration, where multiple changes to a table can be batched together to form a series of instructions for a single, subsequent "move-and-copy" workflow. You can even use "move-and-copy" workflow for other databases, if you want to recreate a table in the background on a busy system. Documentation and status of Alembic is at https://alembic.sqlalchemy.org/ The SQLAlchemy Project ====================== Alembic is part of the `SQLAlchemy Project `_ and adheres to the same standards and conventions as the core project. Development / Bug reporting / Pull requests ___________________________________________ Please refer to the `SQLAlchemy Community Guide `_ for guidelines on coding and participating in this project. Code of Conduct _______________ Above all, SQLAlchemy places great emphasis on polite, thoughtful, and constructive communication between users and developers. Please see our current Code of Conduct at `Code of Conduct `_. License ======= Alembic is distributed under the `MIT license `_. alembic-rel_1_7_6/README.unittests.rst000066400000000000000000000352311417624537100176730ustar00rootroot00000000000000================================ SQLALCHEMY / ALEMBIC UNIT TESTS ================================ Alembic makes use of SQLAlchemy's test framework for its test suite, so working with Alembic's suite is similar to that of working with SQLAlchemy. This document is mostly copied directly from that of SQLAlchemy. Basic Test Running ================== Tox is used to run the test suite fully. For basic test runs against a single Python interpreter:: tox Advanced Tox Options ==================== For more elaborate CI-style test running, the tox script provided will run against various Python / database targets. For a basic run against Python 3.9 using an in-memory SQLite database:: tox -e py39-sqlite The tox runner contains a series of target combinations that can run against various combinations of databases. The test suite can be run against SQLite with "backend" tests also running against a PostgreSQL database:: tox -e py39-sqlite-postgresql Or to run just "backend" tests against a MySQL database:: tox -e py39-mysql-backendonly Running against backends other than SQLite requires that a database of that vendor be available at a specific URL. See "Setting Up Databases" below for details. The pytest Engine ================= The tox runner is using pytest to invoke the test suite. Within the realm of pytest, SQLAlchemy itself is adding a large series of option and customizations to the pytest runner using plugin points, to allow for SQLAlchemy's multiple database support, database setup/teardown and connectivity, multi process support, as well as lots of skip / database selection rules. Running tests with pytest directly grants more immediate control over database options and test selection. A generic pytest run looks like:: pytest -n4 Above, the full test suite will run against SQLite, using four processes. If the "-n" flag is not used, the pytest-xdist is skipped and the tests will run linearly, which will take a pretty long time. The pytest command line is more handy for running subsets of tests and to quickly allow for custom database connections. Example:: pytest --dburi=postgresql+psycopg2://scott:tiger@localhost/test test/sql/test_query.py Above will run the tests in the test/sql/test_query.py file (a pretty good file for basic "does this database work at all?" to start with) against a running PostgreSQL database at the given URL. The pytest frontend can also run tests against multiple kinds of databases at once - a large subset of tests are marked as "backend" tests, which will be run against each available backend, and additionally lots of tests are targeted at specific backends only, which only run if a matching backend is made available. For example, to run the test suite against both PostgreSQL and MySQL at the same time:: pytest -n4 --db postgresql --db mysql Setting Up Databases ==================== The test suite identifies several built-in database tags that run against a pre-set URL. These can be seen using --dbs:: $ pytest --dbs Available --db options (use --dburi to override) default sqlite:///:memory: firebird firebird://sysdba:masterkey@localhost//Users/classic/foo.fdb mariadb mariadb://scott:tiger@192.168.0.199:3307/test mssql mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server mssql_pymssql mssql+pymssql://scott:tiger@ms_2008 mysql mysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 oracle oracle://scott:tiger@127.0.0.1:1521 oracle8 oracle://scott:tiger@127.0.0.1:1521/?use_ansi=0 pg8000 postgresql+pg8000://scott:tiger@127.0.0.1:5432/test postgresql postgresql://scott:tiger@127.0.0.1:5432/test postgresql_psycopg2cffi postgresql+psycopg2cffi://scott:tiger@127.0.0.1:5432/test pymysql mysql+pymysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 sqlite sqlite:///:memory: sqlite_file sqlite:///querytest.db Note that a pyodbc URL **must be against a hostname / database name combination, not a DSN name** when using the multiprocessing option; this is because the test suite needs to generate new URLs to refer to per-process databases that are created on the fly. What those mean is that if you have a database running that can be accessed by the above URL, you can run the test suite against it using ``--db ``. The URLs are present in the ``setup.cfg`` file. You can make your own URLs by creating a new file called ``test.cfg`` and adding your own ``[db]`` section:: # test.cfg file [db] my_postgresql=postgresql://username:pass@hostname/dbname Above, we can now run the tests with ``my_postgresql``:: pytest --db my_postgresql We can also override the existing names in our ``test.cfg`` file, so that we can run with the tox runner also:: # test.cfg file [db] postgresql=postgresql://username:pass@hostname/dbname Now when we run ``tox -e py39-postgresql``, it will use our custom URL instead of the fixed one in setup.cfg. Database Configuration ====================== Step one, the **database chosen for tests must be entirely empty**. A lot of what SQLAlchemy and Alembic test is creating and dropping lots of tables as well as running database introspection to see what is there. If there are pre-existing tables or other objects in the target database already, these will get in the way. A failed test run can also be followed by a run that includes the "--dropfirst" option, which will try to drop all existing tables in the target database. The above paragraph changes somewhat when the multiprocessing option is used, in that separate databases will be created instead, however in the case of Postgresql, the starting database is used as a template, so the starting database must still be empty. See below for example configurations using docker. The test runner will by default create and drop tables within the default database that's in the database URL, *unless* the multiprocessing option is in use via the pytest "-n" flag, which invokes pytest-xdist. The multiprocessing option is **enabled by default** when using the tox runner. When multiprocessing is used, the SQLAlchemy testing framework will create a new database for each process, and then tear it down after the test run is complete. So it will be necessary for the database user to have access to CREATE DATABASE in order for this to work. Additionally, as mentioned earlier, the database URL must be formatted such that it can be rewritten on the fly to refer to these other databases, which means for pyodbc it must refer to a hostname/database name combination, not a DSN name. Several tests require alternate usernames or schemas to be present, which are used to test dotted-name access scenarios. On some databases such as Oracle these are usernames, and others such as PostgreSQL and MySQL they are schemas. The requirement applies to all backends except SQLite and Firebird. The names are:: test_schema test_schema_2 (only used on PostgreSQL and mssql) Please refer to your vendor documentation for the proper syntax to create these namespaces - the database user must have permission to create and drop tables within these schemas. Its perfectly fine to run the test suite without these namespaces present, it only means that a handful of tests which expect them to be present will fail. Additional steps specific to individual databases are as follows:: POSTGRESQL: To enable unicode testing with JSONB, create the database with UTF8 encoding:: postgres=# create database test with owner=scott encoding='utf8' template=template0; To include tests for HSTORE, create the HSTORE type engine:: postgres=# \c test; You are now connected to database "test" as user "postgresql". test=# create extension hstore; CREATE EXTENSION Full-text search configuration should be set to English, else several tests of ``.match()`` will fail. This can be set (if it isn't so already) with: ALTER DATABASE test SET default_text_search_config = 'pg_catalog.english' For two-phase transaction support, the max_prepared_transactions configuration variable must be set to a non-zero value in postgresql.conf. See https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAX-PREPARED-TRANSACTIONS for further background. ORACLE: a user named "test_schema" is created in addition to the default user. The primary database user needs to be able to create and drop tables, synonyms, and constraints within the "test_schema" user. For this to work fully, including that the user has the "REFERENCES" role in a remote schema for tables not yet defined (REFERENCES is per-table), it is required that the test the user be present in the "DBA" role: grant dba to scott; MSSQL: Tests that involve multiple connections require Snapshot Isolation ability implemented on the test database in order to prevent deadlocks that will occur with record locking isolation. This feature is only available with MSSQL 2005 and greater. You must enable snapshot isolation at the database level and set the default cursor isolation with two SQL commands: ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON Docker Configurations --------------------- The SQLAlchemy test can run against database running in Docker containers. This ensures that they are empty and that their configuration is not influenced by any local usage. The following configurations are just examples that developers can use to quickly set up a local environment for SQLAlchemy development. They are **NOT** intended for production use! **PostgreSQL configuration**:: # only needed if a local image of postgres is not already present docker pull postgres:12 # create the container with the proper configuration for sqlalchemy docker run --rm -e POSTGRES_USER='scott' -e POSTGRES_PASSWORD='tiger' -e POSTGRES_DB='test' -p 127.0.0.1:5432:5432 -d --name postgres postgres:12-alpine # configure the database sleep 10 docker exec -ti postgres psql -U scott -c 'CREATE SCHEMA test_schema; CREATE SCHEMA test_schema_2;' test # this last command is optional docker exec -ti postgres sed -i 's/#max_prepared_transactions = 0/max_prepared_transactions = 10/g' /var/lib/postgresql/data/postgresql.conf # To stop the container. It will also remove it. docker stop postgres **MySQL configuration**:: # only needed if a local image of mysql is not already present docker pull mysql:8 # create the container with the proper configuration for sqlalchemy docker run --rm -e MYSQL_USER='scott' -e MYSQL_PASSWORD='tiger' -e MYSQL_DATABASE='test' -e MYSQL_ROOT_PASSWORD='password' -p 127.0.0.1:3306:3306 -d --name mysql mysql:8 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci # configure the database sleep 20 docker exec -ti mysql mysql -u root -ppassword -D test -w -e "GRANT ALL ON *.* TO scott@'%'; CREATE DATABASE test_schema CHARSET utf8mb4; CREATE DATABASE test_schema_2 CHARSET utf8mb4;" # To stop the container. It will also remove it. docker stop mysql **MariaDB configuration**:: # only needed if a local image of MariaDB is not already present docker pull mariadb # create the container with the proper configuration for sqlalchemy docker run --rm -e MYSQL_USER='scott' -e MYSQL_PASSWORD='tiger' -e MYSQL_DATABASE='test' -e MYSQL_ROOT_PASSWORD='password' -p 127.0.0.1:3306:3306 -d --name mariadb mariadb --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci # configure the database sleep 20 docker exec -ti mariadb mysql -u root -ppassword -D test -w -e "GRANT ALL ON *.* TO scott@'%'; CREATE DATABASE test_schema CHARSET utf8mb4; CREATE DATABASE test_schema_2 CHARSET utf8mb4;" # To stop the container. It will also remove it. docker stop mariadb **MSSQL configuration**:: # only needed if a local image of mssql is not already present docker pull mcr.microsoft.com/mssql/server:2019-CU1-ubuntu-16.04 # create the container with the proper configuration for sqlalchemy # it will use the Developer version docker run --rm -e 'ACCEPT_EULA=Y' -e 'SA_PASSWORD=yourStrong(!)Password' -p 127.0.0.1:1433:1433 -d --name mssql mcr.microsoft.com/mssql/server:2019-CU2-ubuntu-16.04 # configure the database sleep 20 docker exec -it mssql /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P 'yourStrong(!)Password' -Q "sp_configure 'contained database authentication', 1; RECONFIGURE; CREATE DATABASE test CONTAINMENT = PARTIAL; ALTER DATABASE test SET ALLOW_SNAPSHOT_ISOLATION ON; ALTER DATABASE test SET READ_COMMITTED_SNAPSHOT ON; CREATE LOGIN scott WITH PASSWORD = 'tiger^5HHH'; ALTER SERVER ROLE sysadmin ADD MEMBER scott;" docker exec -it mssql /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P 'yourStrong(!)Password' -d test -Q "CREATE SCHEMA test_schema" docker exec -it mssql /opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P 'yourStrong(!)Password' -d test -Q "CREATE SCHEMA test_schema_2" # To stop the container. It will also remove it. docker stop mssql NOTE: with this configuration the url to use is not the default one configured in setup, but ``mssql+pymssql://scott:tiger^5HHH@127.0.0.1:1433/test``. It can be used with pytest by using ``--db docker_mssql``. CONFIGURING LOGGING ------------------- SQLAlchemy logs its activity and debugging through Python's logging package. Any log target can be directed to the console with command line options, such as:: $ ./pytest test/orm/test_unitofwork.py -s \ --log-debug=sqlalchemy.pool --log-info=sqlalchemy.engine Above we add the pytest "-s" flag so that standard out is not suppressed. DEVELOPING AND TESTING NEW DIALECTS ------------------------------------------------------- Starting with Alembic 1.7, developers of third-party dialects can include the Alembic test suite using a method similar to that of the SQLAlchemy test suite. See the SQLAlchemy README for third-party dialects … https://github.com/sqlalchemy/sqlalchemy/blob/master/README.dialects.rst … for detail on the overall structure of a third-party dialect and how to incorporate the SQLAlchemy test suite. To add the Alembic test suite, simply add:: from alembic.testing.suite import * # noqa to your "test_suite.py" file. Or you can use two separate files:: # in test_suite_sqlalchemy.py from sqlalchemy.testing.suite import * # noqa and:: # in test_suite_alembic.py from alembic.testing.suite import * # noqa alembic-rel_1_7_6/alembic/000077500000000000000000000000001417624537100155335ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/__init__.py000066400000000000000000000001121417624537100176360ustar00rootroot00000000000000import sys from . import context from . import op __version__ = "1.7.6" alembic-rel_1_7_6/alembic/__main__.py000066400000000000000000000001161417624537100176230ustar00rootroot00000000000000from .config import main if __name__ == "__main__": main(prog="alembic") alembic-rel_1_7_6/alembic/autogenerate/000077500000000000000000000000001417624537100202165ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/autogenerate/__init__.py000066400000000000000000000005371417624537100223340ustar00rootroot00000000000000from .api import _render_migration_diffs from .api import compare_metadata from .api import produce_migrations from .api import render_python_code from .api import RevisionContext from .compare import _produce_net_changes from .compare import comparators from .render import render_op_text from .render import renderers from .rewriter import Rewriter alembic-rel_1_7_6/alembic/autogenerate/api.py000066400000000000000000000501421417624537100213430ustar00rootroot00000000000000"""Provide the 'autogenerate' feature which can produce migration operations automatically.""" import contextlib from typing import Any from typing import Callable from typing import Dict from typing import Iterator from typing import Optional from typing import Set from typing import Tuple from typing import TYPE_CHECKING from typing import Union from sqlalchemy import inspect from . import compare from . import render from .. import util from ..operations import ops if TYPE_CHECKING: from sqlalchemy.engine import Connection from sqlalchemy.engine import Dialect from sqlalchemy.engine import Inspector from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import MetaData from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import UniqueConstraint from alembic.config import Config from alembic.operations.ops import MigrationScript from alembic.operations.ops import UpgradeOps from alembic.runtime.migration import MigrationContext from alembic.script.base import Script from alembic.script.base import ScriptDirectory def compare_metadata(context: "MigrationContext", metadata: "MetaData") -> Any: """Compare a database schema to that given in a :class:`~sqlalchemy.schema.MetaData` instance. The database connection is presented in the context of a :class:`.MigrationContext` object, which provides database connectivity as well as optional comparison functions to use for datatypes and server defaults - see the "autogenerate" arguments at :meth:`.EnvironmentContext.configure` for details on these. The return format is a list of "diff" directives, each representing individual differences:: from alembic.migration import MigrationContext from alembic.autogenerate import compare_metadata from sqlalchemy.schema import SchemaItem from sqlalchemy.types import TypeEngine from sqlalchemy import (create_engine, MetaData, Column, Integer, String, Table, text) import pprint engine = create_engine("sqlite://") with engine.begin() as conn: conn.execute(text(''' create table foo ( id integer not null primary key, old_data varchar, x integer )''')) conn.execute(text(''' create table bar ( data varchar )''')) metadata = MetaData() Table('foo', metadata, Column('id', Integer, primary_key=True), Column('data', Integer), Column('x', Integer, nullable=False) ) Table('bat', metadata, Column('info', String) ) mc = MigrationContext.configure(engine.connect()) diff = compare_metadata(mc, metadata) pprint.pprint(diff, indent=2, width=20) Output:: [ ( 'add_table', Table('bat', MetaData(bind=None), Column('info', String(), table=), schema=None)), ( 'remove_table', Table(u'bar', MetaData(bind=None), Column(u'data', VARCHAR(), table=), schema=None)), ( 'add_column', None, 'foo', Column('data', Integer(), table=)), ( 'remove_column', None, 'foo', Column(u'old_data', VARCHAR(), table=None)), [ ( 'modify_nullable', None, 'foo', u'x', { 'existing_server_default': None, 'existing_type': INTEGER()}, True, False)]] :param context: a :class:`.MigrationContext` instance. :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance. .. seealso:: :func:`.produce_migrations` - produces a :class:`.MigrationScript` structure based on metadata comparison. """ migration_script = produce_migrations(context, metadata) return migration_script.upgrade_ops.as_diffs() def produce_migrations( context: "MigrationContext", metadata: "MetaData" ) -> "MigrationScript": """Produce a :class:`.MigrationScript` structure based on schema comparison. This function does essentially what :func:`.compare_metadata` does, but then runs the resulting list of diffs to produce the full :class:`.MigrationScript` object. For an example of what this looks like, see the example in :ref:`customizing_revision`. .. seealso:: :func:`.compare_metadata` - returns more fundamental "diff" data from comparing a schema. """ autogen_context = AutogenContext(context, metadata=metadata) migration_script = ops.MigrationScript( rev_id=None, upgrade_ops=ops.UpgradeOps([]), downgrade_ops=ops.DowngradeOps([]), ) compare._populate_migration_script(autogen_context, migration_script) return migration_script def render_python_code( up_or_down_op: "UpgradeOps", sqlalchemy_module_prefix: str = "sa.", alembic_module_prefix: str = "op.", render_as_batch: bool = False, imports: Tuple[str, ...] = (), render_item: None = None, migration_context: Optional["MigrationContext"] = None, ) -> str: """Render Python code given an :class:`.UpgradeOps` or :class:`.DowngradeOps` object. This is a convenience function that can be used to test the autogenerate output of a user-defined :class:`.MigrationScript` structure. """ opts = { "sqlalchemy_module_prefix": sqlalchemy_module_prefix, "alembic_module_prefix": alembic_module_prefix, "render_item": render_item, "render_as_batch": render_as_batch, } if migration_context is None: from ..runtime.migration import MigrationContext from sqlalchemy.engine.default import DefaultDialect migration_context = MigrationContext.configure( dialect=DefaultDialect() ) autogen_context = AutogenContext(migration_context, opts=opts) autogen_context.imports = set(imports) return render._indent( render._render_cmd_body(up_or_down_op, autogen_context) ) def _render_migration_diffs( context: "MigrationContext", template_args: Dict[Any, Any] ) -> None: """legacy, used by test_autogen_composition at the moment""" autogen_context = AutogenContext(context) upgrade_ops = ops.UpgradeOps([]) compare._produce_net_changes(autogen_context, upgrade_ops) migration_script = ops.MigrationScript( rev_id=None, upgrade_ops=upgrade_ops, downgrade_ops=upgrade_ops.reverse(), ) render._render_python_into_templatevars( autogen_context, migration_script, template_args ) class AutogenContext: """Maintains configuration and state that's specific to an autogenerate operation.""" metadata: Optional["MetaData"] = None """The :class:`~sqlalchemy.schema.MetaData` object representing the destination. This object is the one that is passed within ``env.py`` to the :paramref:`.EnvironmentContext.configure.target_metadata` parameter. It represents the structure of :class:`.Table` and other objects as stated in the current database model, and represents the destination structure for the database being examined. While the :class:`~sqlalchemy.schema.MetaData` object is primarily known as a collection of :class:`~sqlalchemy.schema.Table` objects, it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary that may be used by end-user schemes to store additional schema-level objects that are to be compared in custom autogeneration schemes. """ connection: Optional["Connection"] = None """The :class:`~sqlalchemy.engine.base.Connection` object currently connected to the database backend being compared. This is obtained from the :attr:`.MigrationContext.bind` and is ultimately set up in the ``env.py`` script. """ dialect: Optional["Dialect"] = None """The :class:`~sqlalchemy.engine.Dialect` object currently in use. This is normally obtained from the :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute. """ imports: Set[str] = None # type: ignore[assignment] """A ``set()`` which contains string Python import directives. The directives are to be rendered into the ``${imports}`` section of a script template. The set is normally empty and can be modified within hooks such as the :paramref:`.EnvironmentContext.configure.render_item` hook. .. seealso:: :ref:`autogen_render_types` """ migration_context: "MigrationContext" = None # type: ignore[assignment] """The :class:`.MigrationContext` established by the ``env.py`` script.""" def __init__( self, migration_context: "MigrationContext", metadata: Optional["MetaData"] = None, opts: Optional[dict] = None, autogenerate: bool = True, ) -> None: if ( autogenerate and migration_context is not None and migration_context.as_sql ): raise util.CommandError( "autogenerate can't use as_sql=True as it prevents querying " "the database for schema information" ) if opts is None: opts = migration_context.opts self.metadata = metadata = ( opts.get("target_metadata", None) if metadata is None else metadata ) if ( autogenerate and metadata is None and migration_context is not None and migration_context.script is not None ): raise util.CommandError( "Can't proceed with --autogenerate option; environment " "script %s does not provide " "a MetaData object or sequence of objects to the context." % (migration_context.script.env_py_location) ) include_object = opts.get("include_object", None) include_name = opts.get("include_name", None) object_filters = [] name_filters = [] if include_object: object_filters.append(include_object) if include_name: name_filters.append(include_name) self._object_filters = object_filters self._name_filters = name_filters self.migration_context = migration_context if self.migration_context is not None: self.connection = self.migration_context.bind self.dialect = self.migration_context.dialect self.imports = set() self.opts: Dict[str, Any] = opts self._has_batch: bool = False @util.memoized_property def inspector(self) -> "Inspector": if self.connection is None: raise TypeError( "can't return inspector as this " "AutogenContext has no database connection" ) return inspect(self.connection) @contextlib.contextmanager def _within_batch(self) -> Iterator[None]: self._has_batch = True yield self._has_batch = False def run_name_filters( self, name: Optional[str], type_: str, parent_names: Dict[str, Optional[str]], ) -> bool: """Run the context's name filters and return True if the targets should be part of the autogenerate operation. This method should be run for every kind of name encountered within the reflection side of an autogenerate operation, giving the environment the chance to filter what names should be reflected as database objects. The filters here are produced directly via the :paramref:`.EnvironmentContext.configure.include_name` parameter. """ if "schema_name" in parent_names: if type_ == "table": table_name = name else: table_name = parent_names.get("table_name", None) if table_name: schema_name = parent_names["schema_name"] if schema_name: parent_names["schema_qualified_table_name"] = "%s.%s" % ( schema_name, table_name, ) else: parent_names["schema_qualified_table_name"] = table_name for fn in self._name_filters: if not fn(name, type_, parent_names): return False else: return True def run_object_filters( self, object_: Union[ "Table", "Index", "Column", "UniqueConstraint", "ForeignKeyConstraint", ], name: Optional[str], type_: str, reflected: bool, compare_to: Optional[ Union["Table", "Index", "Column", "UniqueConstraint"] ], ) -> bool: """Run the context's object filters and return True if the targets should be part of the autogenerate operation. This method should be run for every kind of object encountered within an autogenerate operation, giving the environment the chance to filter what objects should be included in the comparison. The filters here are produced directly via the :paramref:`.EnvironmentContext.configure.include_object` parameter. """ for fn in self._object_filters: if not fn(object_, name, type_, reflected, compare_to): return False else: return True run_filters = run_object_filters @util.memoized_property def sorted_tables(self): """Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s). For a sequence of :class:`.MetaData` objects, this concatenates the :attr:`.MetaData.sorted_tables` collection for each individual :class:`.MetaData` in the order of the sequence. It does **not** collate the sorted tables collections. """ result = [] for m in util.to_list(self.metadata): result.extend(m.sorted_tables) return result @util.memoized_property def table_key_to_table(self): """Return an aggregate of the :attr:`.MetaData.tables` dictionaries. The :attr:`.MetaData.tables` collection is a dictionary of table key to :class:`.Table`; this method aggregates the dictionary across multiple :class:`.MetaData` objects into one dictionary. Duplicate table keys are **not** supported; if two :class:`.MetaData` objects contain the same table key, an exception is raised. """ result = {} for m in util.to_list(self.metadata): intersect = set(result).intersection(set(m.tables)) if intersect: raise ValueError( "Duplicate table keys across multiple " "MetaData objects: %s" % (", ".join('"%s"' % key for key in sorted(intersect))) ) result.update(m.tables) return result class RevisionContext: """Maintains configuration and state that's specific to a revision file generation operation.""" def __init__( self, config: "Config", script_directory: "ScriptDirectory", command_args: Dict[str, Any], process_revision_directives: Optional[Callable] = None, ) -> None: self.config = config self.script_directory = script_directory self.command_args = command_args self.process_revision_directives = process_revision_directives self.template_args = { "config": config # Let templates use config for # e.g. multiple databases } self.generated_revisions = [self._default_revision()] def _to_script( self, migration_script: "MigrationScript" ) -> Optional["Script"]: template_args: Dict[str, Any] = self.template_args.copy() if getattr(migration_script, "_needs_render", False): autogen_context = self._last_autogen_context # clear out existing imports if we are doing multiple # renders autogen_context.imports = set() if migration_script.imports: autogen_context.imports.update(migration_script.imports) render._render_python_into_templatevars( autogen_context, migration_script, template_args ) assert migration_script.rev_id is not None return self.script_directory.generate_revision( migration_script.rev_id, migration_script.message, refresh=True, head=migration_script.head, splice=migration_script.splice, branch_labels=migration_script.branch_label, version_path=migration_script.version_path, depends_on=migration_script.depends_on, **template_args ) def run_autogenerate( self, rev: tuple, migration_context: "MigrationContext" ): self._run_environment(rev, migration_context, True) def run_no_autogenerate( self, rev: tuple, migration_context: "MigrationContext" ): self._run_environment(rev, migration_context, False) def _run_environment( self, rev: tuple, migration_context: "MigrationContext", autogenerate: bool, ): if autogenerate: if self.command_args["sql"]: raise util.CommandError( "Using --sql with --autogenerate does not make any sense" ) if set(self.script_directory.get_revisions(rev)) != set( self.script_directory.get_revisions("heads") ): raise util.CommandError("Target database is not up to date.") upgrade_token = migration_context.opts["upgrade_token"] downgrade_token = migration_context.opts["downgrade_token"] migration_script = self.generated_revisions[-1] if not getattr(migration_script, "_needs_render", False): migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token migration_script.downgrade_ops_list[ -1 ].downgrade_token = downgrade_token migration_script._needs_render = True else: migration_script._upgrade_ops.append( ops.UpgradeOps([], upgrade_token=upgrade_token) ) migration_script._downgrade_ops.append( ops.DowngradeOps([], downgrade_token=downgrade_token) ) autogen_context = AutogenContext( migration_context, autogenerate=autogenerate ) self._last_autogen_context: AutogenContext = autogen_context if autogenerate: compare._populate_migration_script( autogen_context, migration_script ) if self.process_revision_directives: self.process_revision_directives( migration_context, rev, self.generated_revisions ) hook = migration_context.opts["process_revision_directives"] if hook: hook(migration_context, rev, self.generated_revisions) for migration_script in self.generated_revisions: migration_script._needs_render = True def _default_revision(self) -> "MigrationScript": command_args: Dict[str, Any] = self.command_args op = ops.MigrationScript( rev_id=command_args["rev_id"] or util.rev_id(), message=command_args["message"], upgrade_ops=ops.UpgradeOps([]), downgrade_ops=ops.DowngradeOps([]), head=command_args["head"], splice=command_args["splice"], branch_label=command_args["branch_label"], version_path=command_args["version_path"], depends_on=command_args["depends_on"], ) return op def generate_scripts(self) -> Iterator[Optional["Script"]]: for generated_revision in self.generated_revisions: yield self._to_script(generated_revision) alembic-rel_1_7_6/alembic/autogenerate/compare.py000066400000000000000000001316361417624537100222300ustar00rootroot00000000000000import contextlib import logging import re from typing import Any from typing import cast from typing import Dict from typing import Iterator from typing import List from typing import Optional from typing import Set from typing import Tuple from typing import TYPE_CHECKING from typing import Union from sqlalchemy import event from sqlalchemy import inspect from sqlalchemy import schema as sa_schema from sqlalchemy import types as sqltypes from sqlalchemy.util import OrderedSet from alembic.ddl.base import _fk_spec from .render import _user_defined_render from .. import util from ..operations import ops from ..util import sqla_compat if TYPE_CHECKING: from typing import Literal from sqlalchemy.engine.reflection import Inspector from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import UniqueConstraint from alembic.autogenerate.api import AutogenContext from alembic.operations.ops import AlterColumnOp from alembic.operations.ops import MigrationScript from alembic.operations.ops import ModifyTableOps from alembic.operations.ops import UpgradeOps log = logging.getLogger(__name__) def _populate_migration_script( autogen_context: "AutogenContext", migration_script: "MigrationScript" ) -> None: upgrade_ops = migration_script.upgrade_ops_list[-1] downgrade_ops = migration_script.downgrade_ops_list[-1] _produce_net_changes(autogen_context, upgrade_ops) upgrade_ops.reverse_into(downgrade_ops) comparators = util.Dispatcher(uselist=True) def _produce_net_changes( autogen_context: "AutogenContext", upgrade_ops: "UpgradeOps" ) -> None: connection = autogen_context.connection assert connection is not None include_schemas = autogen_context.opts.get("include_schemas", False) inspector: "Inspector" = inspect(connection) default_schema = connection.dialect.default_schema_name schemas: Set[Optional[str]] if include_schemas: schemas = set(inspector.get_schema_names()) # replace default schema name with None schemas.discard("information_schema") # replace the "default" schema with None schemas.discard(default_schema) schemas.add(None) else: schemas = {None} schemas = { s for s in schemas if autogen_context.run_name_filters(s, "schema", {}) } assert autogen_context.dialect is not None comparators.dispatch("schema", autogen_context.dialect.name)( autogen_context, upgrade_ops, schemas ) @comparators.dispatch_for("schema") def _autogen_for_tables( autogen_context: "AutogenContext", upgrade_ops: "UpgradeOps", schemas: Union[Set[None], Set[Optional[str]]], ) -> None: inspector = autogen_context.inspector conn_table_names: Set[Tuple[Optional[str], str]] = set() version_table_schema = ( autogen_context.migration_context.version_table_schema ) version_table = autogen_context.migration_context.version_table for schema_name in schemas: tables = set(inspector.get_table_names(schema=schema_name)) if schema_name == version_table_schema: tables = tables.difference( [autogen_context.migration_context.version_table] ) conn_table_names.update( (schema_name, tname) for tname in tables if autogen_context.run_name_filters( tname, "table", {"schema_name": schema_name} ) ) metadata_table_names = OrderedSet( [(table.schema, table.name) for table in autogen_context.sorted_tables] ).difference([(version_table_schema, version_table)]) _compare_tables( conn_table_names, metadata_table_names, inspector, upgrade_ops, autogen_context, ) def _compare_tables( conn_table_names: "set", metadata_table_names: "set", inspector: "Inspector", upgrade_ops: "UpgradeOps", autogen_context: "AutogenContext", ) -> None: default_schema = inspector.bind.dialect.default_schema_name # tables coming from the connection will not have "schema" # set if it matches default_schema_name; so we need a list # of table names from local metadata that also have "None" if schema # == default_schema_name. Most setups will be like this anyway but # some are not (see #170) metadata_table_names_no_dflt_schema = OrderedSet( [ (schema if schema != default_schema else None, tname) for schema, tname in metadata_table_names ] ) # to adjust for the MetaData collection storing the tables either # as "schemaname.tablename" or just "tablename", create a new lookup # which will match the "non-default-schema" keys to the Table object. tname_to_table = dict( ( no_dflt_schema, autogen_context.table_key_to_table[ sa_schema._get_table_key(tname, schema) ], ) for no_dflt_schema, (schema, tname) in zip( metadata_table_names_no_dflt_schema, metadata_table_names ) ) metadata_table_names = metadata_table_names_no_dflt_schema for s, tname in metadata_table_names.difference(conn_table_names): name = "%s.%s" % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] if autogen_context.run_object_filters( metadata_table, tname, "table", False, None ): upgrade_ops.ops.append( ops.CreateTableOp.from_table(metadata_table) ) log.info("Detected added table %r", name) modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, None, metadata_table, ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: event.listen( t, "column_reflect", # fmt: off autogen_context.migration_context.impl. _compat_autogen_column_reflect (inspector), # fmt: on ) sqla_compat._reflect_table(inspector, t, None) if autogen_context.run_object_filters(t, tname, "table", True, None): modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, t, None ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) upgrade_ops.ops.append(ops.DropTableOp.from_table(t)) log.info("Detected removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: event.listen( t, "column_reflect", # fmt: off autogen_context.migration_context.impl. _compat_autogen_column_reflect(inspector), # fmt: on ) sqla_compat._reflect_table(inspector, t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])): s = s or None name = "%s.%s" % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] conn_table = existing_metadata.tables[name] if autogen_context.run_object_filters( metadata_table, tname, "table", False, conn_table ): modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) with _compare_columns( s, tname, conn_table, metadata_table, modify_table_ops, autogen_context, inspector, ): comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, conn_table, metadata_table, ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) def _make_index(params: Dict[str, Any], conn_table: "Table") -> "Index": ix = sa_schema.Index( params["name"], *[conn_table.c[cname] for cname in params["column_names"]], unique=params["unique"], _table=conn_table ) if "duplicates_constraint" in params: ix.info["duplicates_constraint"] = params["duplicates_constraint"] return ix def _make_unique_constraint( params: Dict[str, Any], conn_table: "Table" ) -> "UniqueConstraint": uq = sa_schema.UniqueConstraint( *[conn_table.c[cname] for cname in params["column_names"]], name=params["name"] ) if "duplicates_index" in params: uq.info["duplicates_index"] = params["duplicates_index"] return uq def _make_foreign_key( params: Dict[str, Any], conn_table: "Table" ) -> "ForeignKeyConstraint": tname = params["referred_table"] if params["referred_schema"]: tname = "%s.%s" % (params["referred_schema"], tname) options = params.get("options", {}) const = sa_schema.ForeignKeyConstraint( [conn_table.c[cname] for cname in params["constrained_columns"]], ["%s.%s" % (tname, n) for n in params["referred_columns"]], onupdate=options.get("onupdate"), ondelete=options.get("ondelete"), deferrable=options.get("deferrable"), initially=options.get("initially"), name=params["name"], ) # needed by 0.7 conn_table.append_constraint(const) return const @contextlib.contextmanager def _compare_columns( schema: Optional[str], tname: Union["quoted_name", str], conn_table: "Table", metadata_table: "Table", modify_table_ops: "ModifyTableOps", autogen_context: "AutogenContext", inspector: "Inspector", ) -> Iterator[None]: name = "%s.%s" % (schema, tname) if schema else tname metadata_col_names = OrderedSet( c.name for c in metadata_table.c if not c.system ) metadata_cols_by_name = { c.name: c for c in metadata_table.c if not c.system } conn_col_names = { c.name: c for c in conn_table.c if autogen_context.run_name_filters( c.name, "column", {"table_name": tname, "schema_name": schema} ) } for cname in metadata_col_names.difference(conn_col_names): if autogen_context.run_object_filters( metadata_cols_by_name[cname], cname, "column", False, None ): modify_table_ops.ops.append( ops.AddColumnOp.from_column_and_tablename( schema, tname, metadata_cols_by_name[cname] ) ) log.info("Detected added column '%s.%s'", name, cname) for colname in metadata_col_names.intersection(conn_col_names): metadata_col = metadata_cols_by_name[colname] conn_col = conn_table.c[colname] if not autogen_context.run_object_filters( metadata_col, colname, "column", False, conn_col ): continue alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema) comparators.dispatch("column")( autogen_context, alter_column_op, schema, tname, colname, conn_col, metadata_col, ) if alter_column_op.has_changes(): modify_table_ops.ops.append(alter_column_op) yield for cname in set(conn_col_names).difference(metadata_col_names): if autogen_context.run_object_filters( conn_table.c[cname], cname, "column", True, None ): modify_table_ops.ops.append( ops.DropColumnOp.from_column_and_tablename( schema, tname, conn_table.c[cname] ) ) log.info("Detected removed column '%s.%s'", name, cname) class _constraint_sig: const: Union["UniqueConstraint", "ForeignKeyConstraint", "Index"] def md_name_to_sql_name(self, context: "AutogenContext") -> Optional[str]: return sqla_compat._get_constraint_final_name( self.const, context.dialect ) def __eq__(self, other): return self.const == other.const def __ne__(self, other): return self.const != other.const def __hash__(self) -> int: return hash(self.const) class _uq_constraint_sig(_constraint_sig): is_index = False is_unique = True def __init__(self, const: "UniqueConstraint") -> None: self.const = const self.name = const.name self.sig = tuple(sorted([col.name for col in const.columns])) @property def column_names(self) -> List[str]: return [col.name for col in self.const.columns] class _ix_constraint_sig(_constraint_sig): is_index = True def __init__(self, const: "Index") -> None: self.const = const self.name = const.name self.sig = tuple(sorted([col.name for col in const.columns])) self.is_unique = bool(const.unique) def md_name_to_sql_name(self, context: "AutogenContext") -> Optional[str]: return sqla_compat._get_constraint_final_name( self.const, context.dialect ) @property def column_names(self) -> Union[List["quoted_name"], List[None]]: return sqla_compat._get_index_column_names(self.const) class _fk_constraint_sig(_constraint_sig): def __init__( self, const: "ForeignKeyConstraint", include_options: bool = False ) -> None: self.const = const self.name = const.name ( self.source_schema, self.source_table, self.source_columns, self.target_schema, self.target_table, self.target_columns, onupdate, ondelete, deferrable, initially, ) = _fk_spec(const) self.sig: Tuple[Any, ...] = ( self.source_schema, self.source_table, tuple(self.source_columns), self.target_schema, self.target_table, tuple(self.target_columns), ) if include_options: self.sig += ( (None if onupdate.lower() == "no action" else onupdate.lower()) if onupdate else None, (None if ondelete.lower() == "no action" else ondelete.lower()) if ondelete else None, # convert initially + deferrable into one three-state value "initially_deferrable" if initially and initially.lower() == "deferred" else "deferrable" if deferrable else "not deferrable", ) @comparators.dispatch_for("table") def _compare_indexes_and_uniques( autogen_context: "AutogenContext", modify_ops: "ModifyTableOps", schema: Optional[str], tname: Union["quoted_name", str], conn_table: Optional["Table"], metadata_table: Optional["Table"], ) -> None: inspector = autogen_context.inspector is_create_table = conn_table is None is_drop_table = metadata_table is None # 1a. get raw indexes and unique constraints from metadata ... if metadata_table is not None: metadata_unique_constraints = set( uq for uq in metadata_table.constraints if isinstance(uq, sa_schema.UniqueConstraint) ) metadata_indexes = set(metadata_table.indexes) else: metadata_unique_constraints = set() metadata_indexes = set() conn_uniques = conn_indexes = frozenset() # type:ignore[var-annotated] supports_unique_constraints = False unique_constraints_duplicate_unique_indexes = False if conn_table is not None: # 1b. ... and from connection, if the table exists if hasattr(inspector, "get_unique_constraints"): try: conn_uniques = inspector.get_unique_constraints( # type:ignore[assignment] # noqa tname, schema=schema ) supports_unique_constraints = True except NotImplementedError: pass except TypeError: # number of arguments is off for the base # method in SQLAlchemy due to the cache decorator # not being present pass else: conn_uniques = [ # type:ignore[assignment] uq for uq in conn_uniques if autogen_context.run_name_filters( uq["name"], "unique_constraint", {"table_name": tname, "schema_name": schema}, ) ] for uq in conn_uniques: if uq.get("duplicates_index"): unique_constraints_duplicate_unique_indexes = True try: conn_indexes = inspector.get_indexes( # type:ignore[assignment] tname, schema=schema ) except NotImplementedError: pass else: conn_indexes = [ # type:ignore[assignment] ix for ix in conn_indexes if autogen_context.run_name_filters( ix["name"], "index", {"table_name": tname, "schema_name": schema}, ) ] # 2. convert conn-level objects from raw inspector records # into schema objects if is_drop_table: # for DROP TABLE uniques are inline, don't need them conn_uniques = set() # type:ignore[assignment] else: conn_uniques = set( # type:ignore[assignment] _make_unique_constraint(uq_def, conn_table) for uq_def in conn_uniques ) conn_indexes = set( # type:ignore[assignment] _make_index(ix, conn_table) for ix in conn_indexes ) # 2a. if the dialect dupes unique indexes as unique constraints # (mysql and oracle), correct for that if unique_constraints_duplicate_unique_indexes: _correct_for_uq_duplicates_uix( conn_uniques, conn_indexes, metadata_unique_constraints, metadata_indexes, autogen_context.dialect, ) # 3. give the dialect a chance to omit indexes and constraints that # we know are either added implicitly by the DB or that the DB # can't accurately report on autogen_context.migration_context.impl.correct_for_autogen_constraints( conn_uniques, conn_indexes, metadata_unique_constraints, metadata_indexes, ) # 4. organize the constraints into "signature" collections, the # _constraint_sig() objects provide a consistent facade over both # Index and UniqueConstraint so we can easily work with them # interchangeably metadata_unique_constraints_sig = set( _uq_constraint_sig(uq) for uq in metadata_unique_constraints ) metadata_indexes_sig = set( _ix_constraint_sig(ix) for ix in metadata_indexes ) conn_unique_constraints = set( _uq_constraint_sig(uq) for uq in conn_uniques ) conn_indexes_sig = set(_ix_constraint_sig(ix) for ix in conn_indexes) # 5. index things by name, for those objects that have names metadata_names = dict( (cast(str, c.md_name_to_sql_name(autogen_context)), c) for c in metadata_unique_constraints_sig.union( metadata_indexes_sig # type:ignore[arg-type] ) if isinstance(c, _ix_constraint_sig) or sqla_compat._constraint_is_named(c.const, autogen_context.dialect) ) conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints) conn_indexes_by_name: Dict[Optional[str], _ix_constraint_sig] = dict( (c.name, c) for c in conn_indexes_sig ) conn_names = dict( (c.name, c) for c in conn_unique_constraints.union( conn_indexes_sig # type:ignore[arg-type] ) if c.name is not None ) doubled_constraints = dict( (name, (conn_uniques_by_name[name], conn_indexes_by_name[name])) for name in set(conn_uniques_by_name).intersection( conn_indexes_by_name ) ) # 6. index things by "column signature", to help with unnamed unique # constraints. conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints) metadata_uniques_by_sig = dict( (uq.sig, uq) for uq in metadata_unique_constraints_sig ) metadata_indexes_by_sig = dict((ix.sig, ix) for ix in metadata_indexes_sig) unnamed_metadata_uniques = dict( (uq.sig, uq) for uq in metadata_unique_constraints_sig if not sqla_compat._constraint_is_named( uq.const, autogen_context.dialect ) ) # assumptions: # 1. a unique constraint or an index from the connection *always* # has a name. # 2. an index on the metadata side *always* has a name. # 3. a unique constraint on the metadata side *might* have a name. # 4. The backend may double up indexes as unique constraints and # vice versa (e.g. MySQL, Postgresql) def obj_added(obj): if obj.is_index: if autogen_context.run_object_filters( obj.const, obj.name, "index", False, None ): modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const)) log.info( "Detected added index '%s' on %s", obj.name, ", ".join(["'%s'" % obj.column_names]), ) else: if not supports_unique_constraints: # can't report unique indexes as added if we don't # detect them return if is_create_table or is_drop_table: # unique constraints are created inline with table defs return if autogen_context.run_object_filters( obj.const, obj.name, "unique_constraint", False, None ): modify_ops.ops.append( ops.AddConstraintOp.from_constraint(obj.const) ) log.info( "Detected added unique constraint '%s' on %s", obj.name, ", ".join(["'%s'" % obj.column_names]), ) def obj_removed(obj): if obj.is_index: if obj.is_unique and not supports_unique_constraints: # many databases double up unique constraints # as unique indexes. without that list we can't # be sure what we're doing here return if autogen_context.run_object_filters( obj.const, obj.name, "index", True, None ): modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const)) log.info( "Detected removed index '%s' on '%s'", obj.name, tname ) else: if is_create_table or is_drop_table: # if the whole table is being dropped, we don't need to # consider unique constraint separately return if autogen_context.run_object_filters( obj.const, obj.name, "unique_constraint", True, None ): modify_ops.ops.append( ops.DropConstraintOp.from_constraint(obj.const) ) log.info( "Detected removed unique constraint '%s' on '%s'", obj.name, tname, ) def obj_changed(old, new, msg): if old.is_index: if autogen_context.run_object_filters( new.const, new.name, "index", False, old.const ): log.info( "Detected changed index '%s' on '%s':%s", old.name, tname, ", ".join(msg), ) modify_ops.ops.append(ops.DropIndexOp.from_index(old.const)) modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const)) else: if autogen_context.run_object_filters( new.const, new.name, "unique_constraint", False, old.const ): log.info( "Detected changed unique constraint '%s' on '%s':%s", old.name, tname, ", ".join(msg), ) modify_ops.ops.append( ops.DropConstraintOp.from_constraint(old.const) ) modify_ops.ops.append( ops.AddConstraintOp.from_constraint(new.const) ) for removed_name in sorted(set(conn_names).difference(metadata_names)): conn_obj: Union[_ix_constraint_sig, _uq_constraint_sig] = conn_names[ removed_name ] if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques: continue elif removed_name in doubled_constraints: if ( conn_obj.sig not in metadata_indexes_by_sig and conn_obj.sig not in metadata_uniques_by_sig ): conn_uq, conn_idx = doubled_constraints[removed_name] obj_removed(conn_uq) obj_removed(conn_idx) else: obj_removed(conn_obj) for existing_name in sorted(set(metadata_names).intersection(conn_names)): metadata_obj = metadata_names[existing_name] if existing_name in doubled_constraints: conn_uq, conn_idx = doubled_constraints[existing_name] if metadata_obj.is_index: conn_obj = conn_idx else: conn_obj = conn_uq else: conn_obj = conn_names[existing_name] if conn_obj.is_index != metadata_obj.is_index: obj_removed(conn_obj) obj_added(metadata_obj) else: msg = [] if conn_obj.is_unique != metadata_obj.is_unique: msg.append( " unique=%r to unique=%r" % (conn_obj.is_unique, metadata_obj.is_unique) ) if conn_obj.sig != metadata_obj.sig: msg.append( " columns %r to %r" % (conn_obj.sig, metadata_obj.sig) ) if msg: obj_changed(conn_obj, metadata_obj, msg) for added_name in sorted(set(metadata_names).difference(conn_names)): obj = metadata_names[added_name] obj_added(obj) for uq_sig in unnamed_metadata_uniques: if uq_sig not in conn_uniques_by_sig: obj_added(unnamed_metadata_uniques[uq_sig]) def _correct_for_uq_duplicates_uix( conn_unique_constraints, conn_indexes, metadata_unique_constraints, metadata_indexes, dialect, ): # dedupe unique indexes vs. constraints, since MySQL / Oracle # doesn't really have unique constraints as a separate construct. # but look in the metadata and try to maintain constructs # that already seem to be defined one way or the other # on that side. This logic was formerly local to MySQL dialect, # generalized to Oracle and others. See #276 # resolve final rendered name for unique constraints defined in the # metadata. this includes truncation of long names. naming convention # names currently should already be set as cons.name, however leave this # to the sqla_compat to decide. metadata_cons_names = [ (sqla_compat._get_constraint_final_name(cons, dialect), cons) for cons in metadata_unique_constraints ] metadata_uq_names = set( name for name, cons in metadata_cons_names if name is not None ) unnamed_metadata_uqs = set( [ _uq_constraint_sig(cons).sig for name, cons in metadata_cons_names if name is None ] ) metadata_ix_names = set( [ sqla_compat._get_constraint_final_name(cons, dialect) for cons in metadata_indexes if cons.unique ] ) # for reflection side, names are in their final database form # already since they're from the database conn_ix_names = dict( (cons.name, cons) for cons in conn_indexes if cons.unique ) uqs_dupe_indexes = dict( (cons.name, cons) for cons in conn_unique_constraints if cons.info["duplicates_index"] ) for overlap in uqs_dupe_indexes: if overlap not in metadata_uq_names: if ( _uq_constraint_sig(uqs_dupe_indexes[overlap]).sig not in unnamed_metadata_uqs ): conn_unique_constraints.discard(uqs_dupe_indexes[overlap]) elif overlap not in metadata_ix_names: conn_indexes.discard(conn_ix_names[overlap]) @comparators.dispatch_for("column") def _compare_nullable( autogen_context: "AutogenContext", alter_column_op: "AlterColumnOp", schema: Optional[str], tname: Union["quoted_name", str], cname: Union["quoted_name", str], conn_col: "Column", metadata_col: "Column", ) -> None: metadata_col_nullable = metadata_col.nullable conn_col_nullable = conn_col.nullable alter_column_op.existing_nullable = conn_col_nullable if conn_col_nullable is not metadata_col_nullable: if ( sqla_compat._server_default_is_computed( metadata_col.server_default, conn_col.server_default ) and sqla_compat._nullability_might_be_unset(metadata_col) or ( sqla_compat._server_default_is_identity( metadata_col.server_default, conn_col.server_default ) ) ): log.info( "Ignoring nullable change on identity column '%s.%s'", tname, cname, ) else: alter_column_op.modify_nullable = metadata_col_nullable log.info( "Detected %s on column '%s.%s'", "NULL" if metadata_col_nullable else "NOT NULL", tname, cname, ) @comparators.dispatch_for("column") def _setup_autoincrement( autogen_context: "AutogenContext", alter_column_op: "AlterColumnOp", schema: Optional[str], tname: Union["quoted_name", str], cname: "quoted_name", conn_col: "Column", metadata_col: "Column", ) -> None: if metadata_col.table._autoincrement_column is metadata_col: alter_column_op.kw["autoincrement"] = True elif metadata_col.autoincrement is True: alter_column_op.kw["autoincrement"] = True elif metadata_col.autoincrement is False: alter_column_op.kw["autoincrement"] = False @comparators.dispatch_for("column") def _compare_type( autogen_context: "AutogenContext", alter_column_op: "AlterColumnOp", schema: Optional[str], tname: Union["quoted_name", str], cname: Union["quoted_name", str], conn_col: "Column", metadata_col: "Column", ) -> None: conn_type = conn_col.type alter_column_op.existing_type = conn_type metadata_type = metadata_col.type if conn_type._type_affinity is sqltypes.NullType: log.info( "Couldn't determine database type " "for column '%s.%s'", tname, cname, ) return if metadata_type._type_affinity is sqltypes.NullType: log.info( "Column '%s.%s' has no type within " "the model; can't compare", tname, cname, ) return isdiff = autogen_context.migration_context._compare_type( conn_col, metadata_col ) if isdiff: alter_column_op.modify_type = metadata_type log.info( "Detected type change from %r to %r on '%s.%s'", conn_type, metadata_type, tname, cname, ) def _render_server_default_for_compare( metadata_default: Optional[Any], metadata_col: "Column", autogen_context: "AutogenContext", ) -> Optional[str]: rendered = _user_defined_render( "server_default", metadata_default, autogen_context ) if rendered is not False: return rendered if isinstance(metadata_default, sa_schema.DefaultClause): if isinstance(metadata_default.arg, str): metadata_default = metadata_default.arg else: metadata_default = str( metadata_default.arg.compile( dialect=autogen_context.dialect, compile_kwargs={"literal_binds": True}, ) ) if isinstance(metadata_default, str): if metadata_col.type._type_affinity is sqltypes.String: metadata_default = re.sub(r"^'|'$", "", metadata_default) return repr(metadata_default) else: return metadata_default else: return None def _normalize_computed_default(sqltext: str) -> str: """we want to warn if a computed sql expression has changed. however we don't want false positives and the warning is not that critical. so filter out most forms of variability from the SQL text. """ return re.sub(r"[ \(\)'\"`\[\]]", "", sqltext).lower() def _compare_computed_default( autogen_context: "AutogenContext", alter_column_op: "AlterColumnOp", schema: Optional[str], tname: "str", cname: "str", conn_col: "Column", metadata_col: "Column", ) -> None: rendered_metadata_default = str( cast(sa_schema.Computed, metadata_col.server_default).sqltext.compile( dialect=autogen_context.dialect, compile_kwargs={"literal_binds": True}, ) ) # since we cannot change computed columns, we do only a crude comparison # here where we try to eliminate syntactical differences in order to # get a minimal comparison just to emit a warning. rendered_metadata_default = _normalize_computed_default( rendered_metadata_default ) if isinstance(conn_col.server_default, sa_schema.Computed): rendered_conn_default = str( conn_col.server_default.sqltext.compile( dialect=autogen_context.dialect, compile_kwargs={"literal_binds": True}, ) ) if rendered_conn_default is None: rendered_conn_default = "" else: rendered_conn_default = _normalize_computed_default( rendered_conn_default ) else: rendered_conn_default = "" if rendered_metadata_default != rendered_conn_default: _warn_computed_not_supported(tname, cname) def _warn_computed_not_supported(tname: str, cname: str) -> None: util.warn("Computed default on %s.%s cannot be modified" % (tname, cname)) def _compare_identity_default( autogen_context, alter_column_op, schema, tname, cname, conn_col, metadata_col, ): impl = autogen_context.migration_context.impl diff, ignored_attr, is_alter = impl._compare_identity_default( metadata_col.server_default, conn_col.server_default ) return diff, is_alter @comparators.dispatch_for("column") def _compare_server_default( autogen_context: "AutogenContext", alter_column_op: "AlterColumnOp", schema: Optional[str], tname: Union["quoted_name", str], cname: Union["quoted_name", str], conn_col: "Column", metadata_col: "Column", ) -> Optional[bool]: metadata_default = metadata_col.server_default conn_col_default = conn_col.server_default if conn_col_default is None and metadata_default is None: return False if sqla_compat._server_default_is_computed(metadata_default): # return False in case of a computed column as the server # default. Note that DDL for adding or removing "GENERATED AS" from # an existing column is not currently known for any backend. # Once SQLAlchemy can reflect "GENERATED" as the "computed" element, # we would also want to ignore and/or warn for changes vs. the # metadata (or support backend specific DDL if applicable). if not sqla_compat.has_computed_reflection: return False else: return ( _compare_computed_default( # type:ignore[func-returns-value] autogen_context, alter_column_op, schema, tname, cname, conn_col, metadata_col, ) ) if sqla_compat._server_default_is_computed(conn_col_default): _warn_computed_not_supported(tname, cname) return False if sqla_compat._server_default_is_identity( metadata_default, conn_col_default ): alter_column_op.existing_server_default = conn_col_default diff, is_alter = _compare_identity_default( autogen_context, alter_column_op, schema, tname, cname, conn_col, metadata_col, ) if is_alter: alter_column_op.modify_server_default = metadata_default if diff: log.info( "Detected server default on column '%s.%s': " "identity options attributes %s", tname, cname, sorted(diff), ) else: rendered_metadata_default = _render_server_default_for_compare( metadata_default, metadata_col, autogen_context ) rendered_conn_default = ( cast(Any, conn_col_default).arg.text if conn_col_default else None ) alter_column_op.existing_server_default = conn_col_default is_diff = autogen_context.migration_context._compare_server_default( conn_col, metadata_col, rendered_metadata_default, rendered_conn_default, ) if is_diff: alter_column_op.modify_server_default = metadata_default log.info("Detected server default on column '%s.%s'", tname, cname) return None @comparators.dispatch_for("column") def _compare_column_comment( autogen_context: "AutogenContext", alter_column_op: "AlterColumnOp", schema: Optional[str], tname: Union["quoted_name", str], cname: "quoted_name", conn_col: "Column", metadata_col: "Column", ) -> Optional["Literal[False]"]: assert autogen_context.dialect is not None if not autogen_context.dialect.supports_comments: return None metadata_comment = metadata_col.comment conn_col_comment = conn_col.comment if conn_col_comment is None and metadata_comment is None: return False alter_column_op.existing_comment = conn_col_comment if conn_col_comment != metadata_comment: alter_column_op.modify_comment = metadata_comment log.info("Detected column comment '%s.%s'", tname, cname) return None @comparators.dispatch_for("table") def _compare_foreign_keys( autogen_context: "AutogenContext", modify_table_ops: "ModifyTableOps", schema: Optional[str], tname: Union["quoted_name", str], conn_table: Optional["Table"], metadata_table: Optional["Table"], ) -> None: # if we're doing CREATE TABLE, all FKs are created # inline within the table def if conn_table is None or metadata_table is None: return inspector = autogen_context.inspector metadata_fks = set( fk for fk in metadata_table.constraints if isinstance(fk, sa_schema.ForeignKeyConstraint) ) conn_fks = [ fk for fk in inspector.get_foreign_keys(tname, schema=schema) if autogen_context.run_name_filters( fk["name"], "foreign_key_constraint", {"table_name": tname, "schema_name": schema}, ) ] backend_reflects_fk_options = bool(conn_fks and "options" in conn_fks[0]) conn_fks = set(_make_foreign_key(const, conn_table) for const in conn_fks) # give the dialect a chance to correct the FKs to match more # closely autogen_context.migration_context.impl.correct_for_autogen_foreignkeys( conn_fks, metadata_fks ) metadata_fks = set( _fk_constraint_sig(fk, include_options=backend_reflects_fk_options) for fk in metadata_fks ) conn_fks = set( _fk_constraint_sig(fk, include_options=backend_reflects_fk_options) for fk in conn_fks ) conn_fks_by_sig = dict((c.sig, c) for c in conn_fks) metadata_fks_by_sig = dict((c.sig, c) for c in metadata_fks) metadata_fks_by_name = dict( (c.name, c) for c in metadata_fks if c.name is not None ) conn_fks_by_name = dict( (c.name, c) for c in conn_fks if c.name is not None ) def _add_fk(obj, compare_to): if autogen_context.run_object_filters( obj.const, obj.name, "foreign_key_constraint", False, compare_to ): modify_table_ops.ops.append( ops.CreateForeignKeyOp.from_constraint(const.const) ) log.info( "Detected added foreign key (%s)(%s) on table %s%s", ", ".join(obj.source_columns), ", ".join(obj.target_columns), "%s." % obj.source_schema if obj.source_schema else "", obj.source_table, ) def _remove_fk(obj, compare_to): if autogen_context.run_object_filters( obj.const, obj.name, "foreign_key_constraint", True, compare_to ): modify_table_ops.ops.append( ops.DropConstraintOp.from_constraint(obj.const) ) log.info( "Detected removed foreign key (%s)(%s) on table %s%s", ", ".join(obj.source_columns), ", ".join(obj.target_columns), "%s." % obj.source_schema if obj.source_schema else "", obj.source_table, ) # so far it appears we don't need to do this by name at all. # SQLite doesn't preserve constraint names anyway for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig): const = conn_fks_by_sig[removed_sig] if removed_sig not in metadata_fks_by_sig: compare_to = ( metadata_fks_by_name[const.name].const if const.name in metadata_fks_by_name else None ) _remove_fk(const, compare_to) for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig): const = metadata_fks_by_sig[added_sig] if added_sig not in conn_fks_by_sig: compare_to = ( conn_fks_by_name[const.name].const if const.name in conn_fks_by_name else None ) _add_fk(const, compare_to) @comparators.dispatch_for("table") def _compare_table_comment( autogen_context: "AutogenContext", modify_table_ops: "ModifyTableOps", schema: Optional[str], tname: Union["quoted_name", str], conn_table: Optional["Table"], metadata_table: Optional["Table"], ) -> None: assert autogen_context.dialect is not None if not autogen_context.dialect.supports_comments: return # if we're doing CREATE TABLE, comments will be created inline # with the create_table op. if conn_table is None or metadata_table is None: return if conn_table.comment is None and metadata_table.comment is None: return if metadata_table.comment is None and conn_table.comment is not None: modify_table_ops.ops.append( ops.DropTableCommentOp( tname, existing_comment=conn_table.comment, schema=schema ) ) elif metadata_table.comment != conn_table.comment: modify_table_ops.ops.append( ops.CreateTableCommentOp( tname, metadata_table.comment, existing_comment=conn_table.comment, schema=schema, ) ) alembic-rel_1_7_6/alembic/autogenerate/render.py000066400000000000000000001041041417624537100220470ustar00rootroot00000000000000from collections import OrderedDict from io import StringIO import re from typing import Any from typing import cast from typing import Dict from typing import List from typing import Optional from typing import Tuple from typing import TYPE_CHECKING from typing import Union from mako.pygen import PythonPrinter from sqlalchemy import schema as sa_schema from sqlalchemy import sql from sqlalchemy import types as sqltypes from sqlalchemy.sql.elements import conv from .. import util from ..operations import ops from ..util import sqla_compat if TYPE_CHECKING: from typing import Literal from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.schema import CheckConstraint from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import DefaultClause from sqlalchemy.sql.schema import FetchedValue from sqlalchemy.sql.schema import ForeignKey from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import MetaData from sqlalchemy.sql.schema import PrimaryKeyConstraint from sqlalchemy.sql.schema import UniqueConstraint from sqlalchemy.sql.sqltypes import ARRAY from sqlalchemy.sql.type_api import TypeEngine from alembic.autogenerate.api import AutogenContext from alembic.config import Config from alembic.operations.ops import MigrationScript from alembic.operations.ops import ModifyTableOps from alembic.util.sqla_compat import Computed from alembic.util.sqla_compat import Identity MAX_PYTHON_ARGS = 255 def _render_gen_name( autogen_context: "AutogenContext", name: Optional[Union["quoted_name", str]], ) -> Optional[Union["quoted_name", str, "_f_name"]]: if isinstance(name, conv): return _f_name(_alembic_autogenerate_prefix(autogen_context), name) else: return name def _indent(text: str) -> str: text = re.compile(r"^", re.M).sub(" ", text).strip() text = re.compile(r" +$", re.M).sub("", text) return text def _render_python_into_templatevars( autogen_context: "AutogenContext", migration_script: "MigrationScript", template_args: Dict[str, Union[str, "Config"]], ) -> None: imports = autogen_context.imports for upgrade_ops, downgrade_ops in zip( migration_script.upgrade_ops_list, migration_script.downgrade_ops_list ): template_args[upgrade_ops.upgrade_token] = _indent( _render_cmd_body(upgrade_ops, autogen_context) ) template_args[downgrade_ops.downgrade_token] = _indent( _render_cmd_body(downgrade_ops, autogen_context) ) template_args["imports"] = "\n".join(sorted(imports)) default_renderers = renderers = util.Dispatcher() def _render_cmd_body( op_container: "ops.OpContainer", autogen_context: "AutogenContext", ) -> str: buf = StringIO() printer = PythonPrinter(buf) printer.writeline( "# ### commands auto generated by Alembic - please adjust! ###" ) has_lines = False for op in op_container.ops: lines = render_op(autogen_context, op) has_lines = has_lines or bool(lines) for line in lines: printer.writeline(line) if not has_lines: printer.writeline("pass") printer.writeline("# ### end Alembic commands ###") return buf.getvalue() def render_op( autogen_context: "AutogenContext", op: "ops.MigrateOperation" ) -> List[str]: renderer = renderers.dispatch(op) lines = util.to_list(renderer(autogen_context, op)) return lines def render_op_text( autogen_context: "AutogenContext", op: "ops.MigrateOperation" ) -> str: return "\n".join(render_op(autogen_context, op)) @renderers.dispatch_for(ops.ModifyTableOps) def _render_modify_table( autogen_context: "AutogenContext", op: "ModifyTableOps" ) -> List[str]: opts = autogen_context.opts render_as_batch = opts.get("render_as_batch", False) if op.ops: lines = [] if render_as_batch: with autogen_context._within_batch(): lines.append( "with op.batch_alter_table(%r, schema=%r) as batch_op:" % (op.table_name, op.schema) ) for t_op in op.ops: t_lines = render_op(autogen_context, t_op) lines.extend(t_lines) lines.append("") else: for t_op in op.ops: t_lines = render_op(autogen_context, t_op) lines.extend(t_lines) return lines else: return [] @renderers.dispatch_for(ops.CreateTableCommentOp) def _render_create_table_comment( autogen_context: "AutogenContext", op: "ops.CreateTableCommentOp" ) -> str: templ = ( "{prefix}create_table_comment(\n" "{indent}'{tname}',\n" "{indent}{comment},\n" "{indent}existing_comment={existing},\n" "{indent}schema={schema}\n" ")" ) return templ.format( prefix=_alembic_autogenerate_prefix(autogen_context), tname=op.table_name, comment="%r" % op.comment if op.comment is not None else None, existing="%r" % op.existing_comment if op.existing_comment is not None else None, schema="'%s'" % op.schema if op.schema is not None else None, indent=" ", ) @renderers.dispatch_for(ops.DropTableCommentOp) def _render_drop_table_comment( autogen_context: "AutogenContext", op: "ops.DropTableCommentOp" ) -> str: templ = ( "{prefix}drop_table_comment(\n" "{indent}'{tname}',\n" "{indent}existing_comment={existing},\n" "{indent}schema={schema}\n" ")" ) return templ.format( prefix=_alembic_autogenerate_prefix(autogen_context), tname=op.table_name, existing="%r" % op.existing_comment if op.existing_comment is not None else None, schema="'%s'" % op.schema if op.schema is not None else None, indent=" ", ) @renderers.dispatch_for(ops.CreateTableOp) def _add_table( autogen_context: "AutogenContext", op: "ops.CreateTableOp" ) -> str: table = op.to_table() args = [ col for col in [ _render_column(col, autogen_context) for col in table.columns ] if col ] + sorted( [ rcons for rcons in [ _render_constraint( cons, autogen_context, op._namespace_metadata ) for cons in table.constraints ] if rcons is not None ] ) if len(args) > MAX_PYTHON_ARGS: args_str = "*[" + ",\n".join(args) + "]" else: args_str = ",\n".join(args) text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { "tablename": _ident(op.table_name), "prefix": _alembic_autogenerate_prefix(autogen_context), "args": args_str, } if op.schema: text += ",\nschema=%r" % _ident(op.schema) comment = table.comment if comment: text += ",\ncomment=%r" % _ident(comment) for k in sorted(op.kw): text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k]) if table._prefixes: prefixes = ", ".join("'%s'" % p for p in table._prefixes) text += ",\nprefixes=[%s]" % prefixes text += "\n)" return text @renderers.dispatch_for(ops.DropTableOp) def _drop_table( autogen_context: "AutogenContext", op: "ops.DropTableOp" ) -> str: text = "%(prefix)sdrop_table(%(tname)r" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": _ident(op.table_name), } if op.schema: text += ", schema=%r" % _ident(op.schema) text += ")" return text @renderers.dispatch_for(ops.CreateIndexOp) def _add_index( autogen_context: "AutogenContext", op: "ops.CreateIndexOp" ) -> str: index = op.to_index() has_batch = autogen_context._has_batch if has_batch: tmpl = ( "%(prefix)screate_index(%(name)r, [%(columns)s], " "unique=%(unique)r%(kwargs)s)" ) else: tmpl = ( "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], " "unique=%(unique)r%(schema)s%(kwargs)s)" ) assert index.table is not None text = tmpl % { "prefix": _alembic_autogenerate_prefix(autogen_context), "name": _render_gen_name(autogen_context, index.name), "table": _ident(index.table.name), "columns": ", ".join( _get_index_rendered_expressions(index, autogen_context) ), "unique": index.unique or False, "schema": (", schema=%r" % _ident(index.table.schema)) if index.table.schema else "", "kwargs": ( ", " + ", ".join( [ "%s=%s" % (key, _render_potential_expr(val, autogen_context)) for key, val in index.kwargs.items() ] ) ) if len(index.kwargs) else "", } return text @renderers.dispatch_for(ops.DropIndexOp) def _drop_index( autogen_context: "AutogenContext", op: "ops.DropIndexOp" ) -> str: index = op.to_index() has_batch = autogen_context._has_batch if has_batch: tmpl = "%(prefix)sdrop_index(%(name)r%(kwargs)s)" else: tmpl = ( "%(prefix)sdrop_index(%(name)r, " "table_name=%(table_name)r%(schema)s%(kwargs)s)" ) text = tmpl % { "prefix": _alembic_autogenerate_prefix(autogen_context), "name": _render_gen_name(autogen_context, op.index_name), "table_name": _ident(op.table_name), "schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""), "kwargs": ( ", " + ", ".join( [ "%s=%s" % (key, _render_potential_expr(val, autogen_context)) for key, val in index.kwargs.items() ] ) ) if len(index.kwargs) else "", } return text @renderers.dispatch_for(ops.CreateUniqueConstraintOp) def _add_unique_constraint( autogen_context: "AutogenContext", op: "ops.CreateUniqueConstraintOp" ) -> List[str]: return [_uq_constraint(op.to_constraint(), autogen_context, True)] @renderers.dispatch_for(ops.CreateForeignKeyOp) def _add_fk_constraint( autogen_context: "AutogenContext", op: "ops.CreateForeignKeyOp" ) -> str: args = [repr(_render_gen_name(autogen_context, op.constraint_name))] if not autogen_context._has_batch: args.append(repr(_ident(op.source_table))) args.extend( [ repr(_ident(op.referent_table)), repr([_ident(col) for col in op.local_cols]), repr([_ident(col) for col in op.remote_cols]), ] ) kwargs = [ "referent_schema", "onupdate", "ondelete", "initially", "deferrable", "use_alter", ] if not autogen_context._has_batch: kwargs.insert(0, "source_schema") for k in kwargs: if k in op.kw: value = op.kw[k] if value is not None: args.append("%s=%r" % (k, value)) return "%(prefix)screate_foreign_key(%(args)s)" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "args": ", ".join(args), } @renderers.dispatch_for(ops.CreatePrimaryKeyOp) def _add_pk_constraint(constraint, autogen_context): raise NotImplementedError() @renderers.dispatch_for(ops.CreateCheckConstraintOp) def _add_check_constraint(constraint, autogen_context): raise NotImplementedError() @renderers.dispatch_for(ops.DropConstraintOp) def _drop_constraint( autogen_context: "AutogenContext", op: "ops.DropConstraintOp" ) -> str: if autogen_context._has_batch: template = "%(prefix)sdrop_constraint" "(%(name)r, type_=%(type)r)" else: template = ( "%(prefix)sdrop_constraint" "(%(name)r, '%(table_name)s'%(schema)s, type_=%(type)r)" ) text = template % { "prefix": _alembic_autogenerate_prefix(autogen_context), "name": _render_gen_name(autogen_context, op.constraint_name), "table_name": _ident(op.table_name), "type": op.constraint_type, "schema": (", schema=%r" % _ident(op.schema)) if op.schema else "", } return text @renderers.dispatch_for(ops.AddColumnOp) def _add_column( autogen_context: "AutogenContext", op: "ops.AddColumnOp" ) -> str: schema, tname, column = op.schema, op.table_name, op.column if autogen_context._has_batch: template = "%(prefix)sadd_column(%(column)s)" else: template = "%(prefix)sadd_column(%(tname)r, %(column)s" if schema: template += ", schema=%(schema)r" template += ")" text = template % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": tname, "column": _render_column(column, autogen_context), "schema": schema, } return text @renderers.dispatch_for(ops.DropColumnOp) def _drop_column( autogen_context: "AutogenContext", op: "ops.DropColumnOp" ) -> str: schema, tname, column_name = op.schema, op.table_name, op.column_name if autogen_context._has_batch: template = "%(prefix)sdrop_column(%(cname)r)" else: template = "%(prefix)sdrop_column(%(tname)r, %(cname)r" if schema: template += ", schema=%(schema)r" template += ")" text = template % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": _ident(tname), "cname": _ident(column_name), "schema": _ident(schema), } return text @renderers.dispatch_for(ops.AlterColumnOp) def _alter_column( autogen_context: "AutogenContext", op: "ops.AlterColumnOp" ) -> str: tname = op.table_name cname = op.column_name server_default = op.modify_server_default type_ = op.modify_type nullable = op.modify_nullable comment = op.modify_comment autoincrement = op.kw.get("autoincrement", None) existing_type = op.existing_type existing_nullable = op.existing_nullable existing_comment = op.existing_comment existing_server_default = op.existing_server_default schema = op.schema indent = " " * 11 if autogen_context._has_batch: template = "%(prefix)salter_column(%(cname)r" else: template = "%(prefix)salter_column(%(tname)r, %(cname)r" text = template % { "prefix": _alembic_autogenerate_prefix(autogen_context), "tname": tname, "cname": cname, } if existing_type is not None: text += ",\n%sexisting_type=%s" % ( indent, _repr_type(existing_type, autogen_context), ) if server_default is not False: rendered = _render_server_default(server_default, autogen_context) text += ",\n%sserver_default=%s" % (indent, rendered) if type_ is not None: text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context)) if nullable is not None: text += ",\n%snullable=%r" % (indent, nullable) if comment is not False: text += ",\n%scomment=%r" % (indent, comment) if existing_comment is not None: text += ",\n%sexisting_comment=%r" % (indent, existing_comment) if nullable is None and existing_nullable is not None: text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable) if autoincrement is not None: text += ",\n%sautoincrement=%r" % (indent, autoincrement) if server_default is False and existing_server_default: rendered = _render_server_default( existing_server_default, autogen_context ) text += ",\n%sexisting_server_default=%s" % (indent, rendered) if schema and not autogen_context._has_batch: text += ",\n%sschema=%r" % (indent, schema) text += ")" return text class _f_name: def __init__(self, prefix: str, name: conv) -> None: self.prefix = prefix self.name = name def __repr__(self) -> str: return "%sf(%r)" % (self.prefix, _ident(self.name)) def _ident(name: Optional[Union["quoted_name", str]]) -> Optional[str]: """produce a __repr__() object for a string identifier that may use quoted_name() in SQLAlchemy 0.9 and greater. The issue worked around here is that quoted_name() doesn't have very good repr() behavior by itself when unicode is involved. """ if name is None: return name elif isinstance(name, sql.elements.quoted_name): return str(name) elif isinstance(name, str): return name def _render_potential_expr( value: Any, autogen_context: "AutogenContext", wrap_in_text: bool = True, is_server_default: bool = False, ) -> str: if isinstance(value, sql.ClauseElement): if wrap_in_text: template = "%(prefix)stext(%(sql)r)" else: template = "%(sql)r" return template % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "sql": autogen_context.migration_context.impl.render_ddl_sql_expr( value, is_server_default=is_server_default ), } else: return repr(value) def _get_index_rendered_expressions( idx: "Index", autogen_context: "AutogenContext" ) -> List[str]: return [ repr(_ident(getattr(exp, "name", None))) if isinstance(exp, sa_schema.Column) else _render_potential_expr(exp, autogen_context) for exp in idx.expressions ] def _uq_constraint( constraint: "UniqueConstraint", autogen_context: "AutogenContext", alter: bool, ) -> str: opts: List[Tuple[str, Any]] = [] has_batch = autogen_context._has_batch if constraint.deferrable: opts.append(("deferrable", str(constraint.deferrable))) if constraint.initially: opts.append(("initially", str(constraint.initially))) if not has_batch and alter and constraint.table.schema: opts.append(("schema", _ident(constraint.table.schema))) if not alter and constraint.name: opts.append( ("name", _render_gen_name(autogen_context, constraint.name)) ) if alter: args = [repr(_render_gen_name(autogen_context, constraint.name))] if not has_batch: args += [repr(_ident(constraint.table.name))] args.append(repr([_ident(col.name) for col in constraint.columns])) args.extend(["%s=%r" % (k, v) for k, v in opts]) return "%(prefix)screate_unique_constraint(%(args)s)" % { "prefix": _alembic_autogenerate_prefix(autogen_context), "args": ", ".join(args), } else: args = [repr(_ident(col.name)) for col in constraint.columns] args.extend(["%s=%r" % (k, v) for k, v in opts]) return "%(prefix)sUniqueConstraint(%(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "args": ", ".join(args), } def _user_autogenerate_prefix(autogen_context, target): prefix = autogen_context.opts["user_module_prefix"] if prefix is None: return "%s." % target.__module__ else: return prefix def _sqlalchemy_autogenerate_prefix(autogen_context: "AutogenContext") -> str: return autogen_context.opts["sqlalchemy_module_prefix"] or "" def _alembic_autogenerate_prefix(autogen_context: "AutogenContext") -> str: if autogen_context._has_batch: return "batch_op." else: return autogen_context.opts["alembic_module_prefix"] or "" def _user_defined_render( type_: str, object_: Any, autogen_context: "AutogenContext" ) -> Union[str, "Literal[False]"]: if "render_item" in autogen_context.opts: render = autogen_context.opts["render_item"] if render: rendered = render(type_, object_, autogen_context) if rendered is not False: return rendered return False def _render_column(column: "Column", autogen_context: "AutogenContext") -> str: rendered = _user_defined_render("column", column, autogen_context) if rendered is not False: return rendered args: List[str] = [] opts: List[Tuple[str, Any]] = [] if column.server_default: rendered = _render_server_default( # type:ignore[assignment] column.server_default, autogen_context ) if rendered: if _should_render_server_default_positionally( column.server_default ): args.append(rendered) else: opts.append(("server_default", rendered)) if ( column.autoincrement is not None and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT ): opts.append(("autoincrement", column.autoincrement)) if column.nullable is not None: opts.append(("nullable", column.nullable)) if column.system: opts.append(("system", column.system)) comment = column.comment if comment: opts.append(("comment", "%r" % comment)) # TODO: for non-ascii colname, assign a "key" return "%(prefix)sColumn(%(name)r, %(type)s, %(args)s%(kwargs)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "name": _ident(column.name), "type": _repr_type(column.type, autogen_context), "args": ", ".join([str(arg) for arg in args]) + ", " if args else "", "kwargs": ( ", ".join( ["%s=%s" % (kwname, val) for kwname, val in opts] + [ "%s=%s" % (key, _render_potential_expr(val, autogen_context)) for key, val in sqla_compat._column_kwargs(column).items() ] ) ), } def _should_render_server_default_positionally( server_default: Union["Computed", "DefaultClause"] ) -> bool: return sqla_compat._server_default_is_computed( server_default ) or sqla_compat._server_default_is_identity(server_default) def _render_server_default( default: Optional[ Union["FetchedValue", str, "TextClause", "ColumnElement"] ], autogen_context: "AutogenContext", repr_: bool = True, ) -> Optional[str]: rendered = _user_defined_render("server_default", default, autogen_context) if rendered is not False: return rendered if sqla_compat._server_default_is_computed(default): return _render_computed(cast("Computed", default), autogen_context) elif sqla_compat._server_default_is_identity(default): return _render_identity(cast("Identity", default), autogen_context) elif isinstance(default, sa_schema.DefaultClause): if isinstance(default.arg, str): default = default.arg else: return _render_potential_expr( default.arg, autogen_context, is_server_default=True ) if isinstance(default, str) and repr_: default = repr(re.sub(r"^'|'$", "", default)) return cast(str, default) def _render_computed( computed: "Computed", autogen_context: "AutogenContext" ) -> str: text = _render_potential_expr( computed.sqltext, autogen_context, wrap_in_text=False ) kwargs = {} if computed.persisted is not None: kwargs["persisted"] = computed.persisted return "%(prefix)sComputed(%(text)s, %(kwargs)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "text": text, "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())), } def _render_identity( identity: "Identity", autogen_context: "AutogenContext" ) -> str: # always=None means something different than always=False kwargs = OrderedDict(always=identity.always) if identity.on_null is not None: kwargs["on_null"] = identity.on_null kwargs.update(_get_identity_options(identity)) return "%(prefix)sIdentity(%(kwargs)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())), } def _get_identity_options(identity_options: "Identity") -> OrderedDict: kwargs = OrderedDict() for attr in sqla_compat._identity_options_attrs: value = getattr(identity_options, attr, None) if value is not None: kwargs[attr] = value return kwargs def _repr_type( type_: "TypeEngine", autogen_context: "AutogenContext", _skip_variants: bool = False, ) -> str: rendered = _user_defined_render("type", type_, autogen_context) if rendered is not False: return rendered if hasattr(autogen_context.migration_context, "impl"): impl_rt = autogen_context.migration_context.impl.render_type( type_, autogen_context ) else: impl_rt = None mod = type(type_).__module__ imports = autogen_context.imports if mod.startswith("sqlalchemy.dialects"): match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod) assert match is not None dname = match.group(1) if imports is not None: imports.add("from sqlalchemy.dialects import %s" % dname) if impl_rt: return impl_rt else: return "%s.%r" % (dname, type_) elif impl_rt: return impl_rt elif mod.startswith("sqlalchemy."): if not _skip_variants and sqla_compat._type_has_variants(type_): return _render_Variant_type(type_, autogen_context) if "_render_%s_type" % type_.__visit_name__ in globals(): fn = globals()["_render_%s_type" % type_.__visit_name__] return fn(type_, autogen_context) else: prefix = _sqlalchemy_autogenerate_prefix(autogen_context) return "%s%r" % (prefix, type_) else: prefix = _user_autogenerate_prefix(autogen_context, type_) return "%s%r" % (prefix, type_) def _render_ARRAY_type( type_: "ARRAY", autogen_context: "AutogenContext" ) -> str: return cast( str, _render_type_w_subtype( type_, autogen_context, "item_type", r"(.+?\()" ), ) def _render_Variant_type( type_: "TypeEngine", autogen_context: "AutogenContext" ) -> str: base_type, variant_mapping = sqla_compat._get_variant_mapping(type_) base = _repr_type(base_type, autogen_context, _skip_variants=True) assert base is not None and base is not False for dialect in sorted(variant_mapping): typ = variant_mapping[dialect] base += ".with_variant(%s, %r)" % ( _repr_type(typ, autogen_context, _skip_variants=True), dialect, ) return base def _render_type_w_subtype( type_: "TypeEngine", autogen_context: "AutogenContext", attrname: str, regexp: str, prefix: Optional[str] = None, ) -> Union[Optional[str], "Literal[False]"]: outer_repr = repr(type_) inner_type = getattr(type_, attrname, None) if inner_type is None: return False inner_repr = repr(inner_type) inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr) sub_type = _repr_type(getattr(type_, attrname), autogen_context) outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr) if prefix: return "%s%s" % (prefix, outer_type) mod = type(type_).__module__ if mod.startswith("sqlalchemy.dialects"): match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod) assert match is not None dname = match.group(1) return "%s.%s" % (dname, outer_type) elif mod.startswith("sqlalchemy"): prefix = _sqlalchemy_autogenerate_prefix(autogen_context) return "%s%s" % (prefix, outer_type) else: return None _constraint_renderers = util.Dispatcher() def _render_constraint( constraint: "Constraint", autogen_context: "AutogenContext", namespace_metadata: Optional["MetaData"], ) -> Optional[str]: try: renderer = _constraint_renderers.dispatch(constraint) except ValueError: util.warn("No renderer is established for object %r" % constraint) return "[Unknown Python object %r]" % constraint else: return renderer(constraint, autogen_context, namespace_metadata) @_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint) def _render_primary_key( constraint: "PrimaryKeyConstraint", autogen_context: "AutogenContext", namespace_metadata: Optional["MetaData"], ) -> Optional[str]: rendered = _user_defined_render("primary_key", constraint, autogen_context) if rendered is not False: return rendered if not constraint.columns: return None opts = [] if constraint.name: opts.append( ("name", repr(_render_gen_name(autogen_context, constraint.name))) ) return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "args": ", ".join( [repr(c.name) for c in constraint.columns] + ["%s=%s" % (kwname, val) for kwname, val in opts] ), } def _fk_colspec( fk: "ForeignKey", metadata_schema: Optional[str], namespace_metadata: "MetaData", ) -> str: """Implement a 'safe' version of ForeignKey._get_colspec() that won't fail if the remote table can't be resolved. """ colspec = fk._get_colspec() # type:ignore[attr-defined] tokens = colspec.split(".") tname, colname = tokens[-2:] if metadata_schema is not None and len(tokens) == 2: table_fullname = "%s.%s" % (metadata_schema, tname) else: table_fullname = ".".join(tokens[0:-1]) if ( not fk.link_to_name and fk.parent is not None and fk.parent.table is not None ): # try to resolve the remote table in order to adjust for column.key. # the FK constraint needs to be rendered in terms of the column # name. if table_fullname in namespace_metadata.tables: col = namespace_metadata.tables[table_fullname].c.get(colname) if col is not None: colname = _ident(col.name) colspec = "%s.%s" % (table_fullname, colname) return colspec def _populate_render_fk_opts( constraint: "ForeignKeyConstraint", opts: List[Tuple[str, str]] ) -> None: if constraint.onupdate: opts.append(("onupdate", repr(constraint.onupdate))) if constraint.ondelete: opts.append(("ondelete", repr(constraint.ondelete))) if constraint.initially: opts.append(("initially", repr(constraint.initially))) if constraint.deferrable: opts.append(("deferrable", repr(constraint.deferrable))) if constraint.use_alter: opts.append(("use_alter", repr(constraint.use_alter))) @_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint) def _render_foreign_key( constraint: "ForeignKeyConstraint", autogen_context: "AutogenContext", namespace_metadata: "MetaData", ) -> Optional[str]: rendered = _user_defined_render("foreign_key", constraint, autogen_context) if rendered is not False: return rendered opts = [] if constraint.name: opts.append( ("name", repr(_render_gen_name(autogen_context, constraint.name))) ) _populate_render_fk_opts(constraint, opts) apply_metadata_schema = namespace_metadata.schema return ( "%(prefix)sForeignKeyConstraint([%(cols)s], " "[%(refcols)s], %(args)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "cols": ", ".join( "%r" % _ident(cast("Column", f.parent).name) for f in constraint.elements ), "refcols": ", ".join( repr(_fk_colspec(f, apply_metadata_schema, namespace_metadata)) for f in constraint.elements ), "args": ", ".join( ["%s=%s" % (kwname, val) for kwname, val in opts] ), } ) @_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint) def _render_unique_constraint( constraint: "UniqueConstraint", autogen_context: "AutogenContext", namespace_metadata: Optional["MetaData"], ) -> str: rendered = _user_defined_render("unique", constraint, autogen_context) if rendered is not False: return rendered return _uq_constraint(constraint, autogen_context, False) @_constraint_renderers.dispatch_for(sa_schema.CheckConstraint) def _render_check_constraint( constraint: "CheckConstraint", autogen_context: "AutogenContext", namespace_metadata: Optional["MetaData"], ) -> Optional[str]: rendered = _user_defined_render("check", constraint, autogen_context) if rendered is not False: return rendered # detect the constraint being part of # a parent type which is probably in the Table already. # ideally SQLAlchemy would give us more of a first class # way to detect this. if ( constraint._create_rule # type:ignore[attr-defined] and hasattr( constraint._create_rule, "target" # type:ignore[attr-defined] ) and isinstance( constraint._create_rule.target, # type:ignore[attr-defined] sqltypes.TypeEngine, ) ): return None opts = [] if constraint.name: opts.append( ("name", repr(_render_gen_name(autogen_context, constraint.name))) ) return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % { "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), "opts": ", " + (", ".join("%s=%s" % (k, v) for k, v in opts)) if opts else "", "sqltext": _render_potential_expr( constraint.sqltext, autogen_context, wrap_in_text=False ), } @renderers.dispatch_for(ops.ExecuteSQLOp) def _execute_sql( autogen_context: "AutogenContext", op: "ops.ExecuteSQLOp" ) -> str: if not isinstance(op.sqltext, str): raise NotImplementedError( "Autogenerate rendering of SQL Expression language constructs " "not supported here; please use a plain SQL string" ) return "op.execute(%r)" % op.sqltext renderers = default_renderers.branch() alembic-rel_1_7_6/alembic/autogenerate/rewriter.py000066400000000000000000000164241417624537100224420ustar00rootroot00000000000000from typing import Any from typing import Callable from typing import Iterator from typing import List from typing import Type from typing import TYPE_CHECKING from typing import Union from alembic import util from alembic.operations import ops if TYPE_CHECKING: from alembic.operations.ops import AddColumnOp from alembic.operations.ops import AlterColumnOp from alembic.operations.ops import CreateTableOp from alembic.operations.ops import MigrateOperation from alembic.operations.ops import MigrationScript from alembic.operations.ops import ModifyTableOps from alembic.operations.ops import OpContainer from alembic.runtime.migration import MigrationContext from alembic.script.revision import Revision class Rewriter: """A helper object that allows easy 'rewriting' of ops streams. The :class:`.Rewriter` object is intended to be passed along to the :paramref:`.EnvironmentContext.configure.process_revision_directives` parameter in an ``env.py`` script. Once constructed, any number of "rewrites" functions can be associated with it, which will be given the opportunity to modify the structure without having to have explicit knowledge of the overall structure. The function is passed the :class:`.MigrationContext` object and ``revision`` tuple that are passed to the :paramref:`.Environment Context.configure.process_revision_directives` function normally, and the third argument is an individual directive of the type noted in the decorator. The function has the choice of returning a single op directive, which normally can be the directive that was actually passed, or a new directive to replace it, or a list of zero or more directives to replace it. .. seealso:: :ref:`autogen_rewriter` - usage example """ _traverse = util.Dispatcher() _chained = None def __init__(self) -> None: self.dispatch = util.Dispatcher() def chain(self, other: "Rewriter") -> "Rewriter": """Produce a "chain" of this :class:`.Rewriter` to another. This allows two rewriters to operate serially on a stream, e.g.:: writer1 = autogenerate.Rewriter() writer2 = autogenerate.Rewriter() @writer1.rewrites(ops.AddColumnOp) def add_column_nullable(context, revision, op): op.column.nullable = True return op @writer2.rewrites(ops.AddColumnOp) def add_column_idx(context, revision, op): idx_op = ops.CreateIndexOp( 'ixc', op.table_name, [op.column.name]) return [ op, idx_op ] writer = writer1.chain(writer2) :param other: a :class:`.Rewriter` instance :return: a new :class:`.Rewriter` that will run the operations of this writer, then the "other" writer, in succession. """ wr = self.__class__.__new__(self.__class__) wr.__dict__.update(self.__dict__) wr._chained = other return wr def rewrites( self, operator: Union[ Type["AddColumnOp"], Type["MigrateOperation"], Type["AlterColumnOp"], Type["CreateTableOp"], Type["ModifyTableOps"], ], ) -> Callable: """Register a function as rewriter for a given type. The function should receive three arguments, which are the :class:`.MigrationContext`, a ``revision`` tuple, and an op directive of the type indicated. E.g.:: @writer1.rewrites(ops.AddColumnOp) def add_column_nullable(context, revision, op): op.column.nullable = True return op """ return self.dispatch.dispatch_for(operator) def _rewrite( self, context: "MigrationContext", revision: "Revision", directive: "MigrateOperation", ) -> Iterator["MigrateOperation"]: try: _rewriter = self.dispatch.dispatch(directive) except ValueError: _rewriter = None yield directive else: if self in directive._mutations: yield directive else: for r_directive in util.to_list( _rewriter(context, revision, directive), [] ): r_directive._mutations = r_directive._mutations.union( [self] ) yield r_directive def __call__( self, context: "MigrationContext", revision: "Revision", directives: List["MigrationScript"], ) -> None: self.process_revision_directives(context, revision, directives) if self._chained: self._chained(context, revision, directives) @_traverse.dispatch_for(ops.MigrationScript) def _traverse_script( self, context: "MigrationContext", revision: "Revision", directive: "MigrationScript", ) -> None: upgrade_ops_list = [] for upgrade_ops in directive.upgrade_ops_list: ret = self._traverse_for(context, revision, upgrade_ops) if len(ret) != 1: raise ValueError( "Can only return single object for UpgradeOps traverse" ) upgrade_ops_list.append(ret[0]) directive.upgrade_ops = upgrade_ops_list downgrade_ops_list = [] for downgrade_ops in directive.downgrade_ops_list: ret = self._traverse_for(context, revision, downgrade_ops) if len(ret) != 1: raise ValueError( "Can only return single object for DowngradeOps traverse" ) downgrade_ops_list.append(ret[0]) directive.downgrade_ops = downgrade_ops_list @_traverse.dispatch_for(ops.OpContainer) def _traverse_op_container( self, context: "MigrationContext", revision: "Revision", directive: "OpContainer", ) -> None: self._traverse_list(context, revision, directive.ops) @_traverse.dispatch_for(ops.MigrateOperation) def _traverse_any_directive( self, context: "MigrationContext", revision: "Revision", directive: "MigrateOperation", ) -> None: pass def _traverse_for( self, context: "MigrationContext", revision: "Revision", directive: "MigrateOperation", ) -> Any: directives = list(self._rewrite(context, revision, directive)) for directive in directives: traverser = self._traverse.dispatch(directive) traverser(self, context, revision, directive) return directives def _traverse_list( self, context: "MigrationContext", revision: "Revision", directives: Any, ) -> None: dest = [] for directive in directives: dest.extend(self._traverse_for(context, revision, directive)) directives[:] = dest def process_revision_directives( self, context: "MigrationContext", revision: "Revision", directives: List["MigrationScript"], ) -> None: self._traverse_list(context, revision, directives) alembic-rel_1_7_6/alembic/command.py000066400000000000000000000461071417624537100175330ustar00rootroot00000000000000import os from typing import Callable from typing import cast from typing import List from typing import Optional from typing import TYPE_CHECKING from typing import Union from . import autogenerate as autogen from . import util from .runtime.environment import EnvironmentContext from .script import ScriptDirectory if TYPE_CHECKING: from alembic.config import Config from alembic.script.base import Script def list_templates(config): """List available templates. :param config: a :class:`.Config` object. """ config.print_stdout("Available templates:\n") for tempname in os.listdir(config.get_template_directory()): with open( os.path.join(config.get_template_directory(), tempname, "README") ) as readme: synopsis = next(readme) config.print_stdout("%s - %s", tempname, synopsis) config.print_stdout("\nTemplates are used via the 'init' command, e.g.:") config.print_stdout("\n alembic init --template generic ./scripts") def init( config: "Config", directory: str, template: str = "generic", package: bool = False, ) -> None: """Initialize a new scripts directory. :param config: a :class:`.Config` object. :param directory: string path of the target directory :param template: string name of the migration environment template to use. :param package: when True, write ``__init__.py`` files into the environment location as well as the versions/ location. .. versionadded:: 1.2 """ if os.access(directory, os.F_OK) and os.listdir(directory): raise util.CommandError( "Directory %s already exists and is not empty" % directory ) template_dir = os.path.join(config.get_template_directory(), template) if not os.access(template_dir, os.F_OK): raise util.CommandError("No such template %r" % template) if not os.access(directory, os.F_OK): util.status( "Creating directory %s" % os.path.abspath(directory), os.makedirs, directory, ) versions = os.path.join(directory, "versions") util.status( "Creating directory %s" % os.path.abspath(versions), os.makedirs, versions, ) script = ScriptDirectory(directory) for file_ in os.listdir(template_dir): file_path = os.path.join(template_dir, file_) if file_ == "alembic.ini.mako": config_file = os.path.abspath(cast(str, config.config_file_name)) if os.access(cast(str, config_file), os.F_OK): util.msg("File %s already exists, skipping" % config_file) else: script._generate_template( file_path, config_file, script_location=directory ) elif os.path.isfile(file_path): output_file = os.path.join(directory, file_) script._copy_file(file_path, output_file) if package: for path in [ os.path.join(os.path.abspath(directory), "__init__.py"), os.path.join(os.path.abspath(versions), "__init__.py"), ]: file_ = util.status("Adding %s" % path, open, path, "w") file_.close() # type:ignore[attr-defined] util.msg( "Please edit configuration/connection/logging " "settings in %r before proceeding." % config_file ) def revision( config: "Config", message: Optional[str] = None, autogenerate: bool = False, sql: bool = False, head: str = "head", splice: bool = False, branch_label: Optional[str] = None, version_path: Optional[str] = None, rev_id: Optional[str] = None, depends_on: Optional[str] = None, process_revision_directives: Callable = None, ) -> Union[Optional["Script"], List[Optional["Script"]]]: """Create a new revision file. :param config: a :class:`.Config` object. :param message: string message to apply to the revision; this is the ``-m`` option to ``alembic revision``. :param autogenerate: whether or not to autogenerate the script from the database; this is the ``--autogenerate`` option to ``alembic revision``. :param sql: whether to dump the script out as a SQL string; when specified, the script is dumped to stdout. This is the ``--sql`` option to ``alembic revision``. :param head: head revision to build the new revision upon as a parent; this is the ``--head`` option to ``alembic revision``. :param splice: whether or not the new revision should be made into a new head of its own; is required when the given ``head`` is not itself a head. This is the ``--splice`` option to ``alembic revision``. :param branch_label: string label to apply to the branch; this is the ``--branch-label`` option to ``alembic revision``. :param version_path: string symbol identifying a specific version path from the configuration; this is the ``--version-path`` option to ``alembic revision``. :param rev_id: optional revision identifier to use instead of having one generated; this is the ``--rev-id`` option to ``alembic revision``. :param depends_on: optional list of "depends on" identifiers; this is the ``--depends-on`` option to ``alembic revision``. :param process_revision_directives: this is a callable that takes the same form as the callable described at :paramref:`.EnvironmentContext.configure.process_revision_directives`; will be applied to the structure generated by the revision process where it can be altered programmatically. Note that unlike all the other parameters, this option is only available via programmatic use of :func:`.command.revision` """ script_directory = ScriptDirectory.from_config(config) command_args = dict( message=message, autogenerate=autogenerate, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id, depends_on=depends_on, ) revision_context = autogen.RevisionContext( config, script_directory, command_args, process_revision_directives=process_revision_directives, ) environment = util.asbool(config.get_main_option("revision_environment")) if autogenerate: environment = True if sql: raise util.CommandError( "Using --sql with --autogenerate does not make any sense" ) def retrieve_migrations(rev, context): revision_context.run_autogenerate(rev, context) return [] elif environment: def retrieve_migrations(rev, context): revision_context.run_no_autogenerate(rev, context) return [] elif sql: raise util.CommandError( "Using --sql with the revision command when " "revision_environment is not configured does not make any sense" ) if environment: with EnvironmentContext( config, script_directory, fn=retrieve_migrations, as_sql=sql, template_args=revision_context.template_args, revision_context=revision_context, ): script_directory.run_env() # the revision_context now has MigrationScript structure(s) present. # these could theoretically be further processed / rewritten *here*, # in addition to the hooks present within each run_migrations() call, # or at the end of env.py run_migrations_online(). scripts = [script for script in revision_context.generate_scripts()] if len(scripts) == 1: return scripts[0] else: return scripts def merge( config: "Config", revisions: str, message: str = None, branch_label: str = None, rev_id: str = None, ) -> Optional["Script"]: """Merge two revisions together. Creates a new migration file. :param config: a :class:`.Config` instance :param message: string message to apply to the revision :param branch_label: string label name to apply to the new revision :param rev_id: hardcoded revision identifier instead of generating a new one. .. seealso:: :ref:`branches` """ script = ScriptDirectory.from_config(config) template_args = { "config": "config" # Let templates use config for # e.g. multiple databases } return script.generate_revision( rev_id or util.rev_id(), message, refresh=True, head=revisions, branch_labels=branch_label, **template_args # type:ignore[arg-type] ) def upgrade( config: "Config", revision: str, sql: bool = False, tag: Optional[str] = None, ) -> None: """Upgrade to a later version. :param config: a :class:`.Config` instance. :param revision: string revision target or range for --sql mode :param sql: if True, use ``--sql`` mode :param tag: an arbitrary "tag" that can be intercepted by custom ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` method. """ script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise util.CommandError("Range revision not allowed") starting_rev, revision = revision.split(":", 2) def upgrade(rev, context): return script._upgrade_revs(revision, rev) with EnvironmentContext( config, script, fn=upgrade, as_sql=sql, starting_rev=starting_rev, destination_rev=revision, tag=tag, ): script.run_env() def downgrade( config: "Config", revision: str, sql: bool = False, tag: Optional[str] = None, ) -> None: """Revert to a previous version. :param config: a :class:`.Config` instance. :param revision: string revision target or range for --sql mode :param sql: if True, use ``--sql`` mode :param tag: an arbitrary "tag" that can be intercepted by custom ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` method. """ script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise util.CommandError("Range revision not allowed") starting_rev, revision = revision.split(":", 2) elif sql: raise util.CommandError( "downgrade with --sql requires :" ) def downgrade(rev, context): return script._downgrade_revs(revision, rev) with EnvironmentContext( config, script, fn=downgrade, as_sql=sql, starting_rev=starting_rev, destination_rev=revision, tag=tag, ): script.run_env() def show(config, rev): """Show the revision(s) denoted by the given symbol. :param config: a :class:`.Config` instance. :param revision: string revision target """ script = ScriptDirectory.from_config(config) if rev == "current": def show_current(rev, context): for sc in script.get_revisions(rev): config.print_stdout(sc.log_entry) return [] with EnvironmentContext(config, script, fn=show_current): script.run_env() else: for sc in script.get_revisions(rev): config.print_stdout(sc.log_entry) def history( config: "Config", rev_range: Optional[str] = None, verbose: bool = False, indicate_current: bool = False, ) -> None: """List changeset scripts in chronological order. :param config: a :class:`.Config` instance. :param rev_range: string revision range :param verbose: output in verbose mode. :param indicate_current: indicate current revision. """ base: Optional[str] head: Optional[str] script = ScriptDirectory.from_config(config) if rev_range is not None: if ":" not in rev_range: raise util.CommandError( "History range requires [start]:[end], " "[start]:, or :[end]" ) base, head = rev_range.strip().split(":") else: base = head = None environment = ( util.asbool(config.get_main_option("revision_environment")) or indicate_current ) def _display_history(config, script, base, head, currents=()): for sc in script.walk_revisions( base=base or "base", head=head or "heads" ): if indicate_current: sc._db_current_indicator = sc.revision in currents config.print_stdout( sc.cmd_format( verbose=verbose, include_branches=True, include_doc=True, include_parents=True, ) ) def _display_history_w_current(config, script, base, head): def _display_current_history(rev, context): if head == "current": _display_history(config, script, base, rev, rev) elif base == "current": _display_history(config, script, rev, head, rev) else: _display_history(config, script, base, head, rev) return [] with EnvironmentContext(config, script, fn=_display_current_history): script.run_env() if base == "current" or head == "current" or environment: _display_history_w_current(config, script, base, head) else: _display_history(config, script, base, head) def heads(config, verbose=False, resolve_dependencies=False): """Show current available heads in the script directory. :param config: a :class:`.Config` instance. :param verbose: output in verbose mode. :param resolve_dependencies: treat dependency version as down revisions. """ script = ScriptDirectory.from_config(config) if resolve_dependencies: heads = script.get_revisions("heads") else: heads = script.get_revisions(script.get_heads()) for rev in heads: config.print_stdout( rev.cmd_format( verbose, include_branches=True, tree_indicators=False ) ) def branches(config, verbose=False): """Show current branch points. :param config: a :class:`.Config` instance. :param verbose: output in verbose mode. """ script = ScriptDirectory.from_config(config) for sc in script.walk_revisions(): if sc.is_branch_point: config.print_stdout( "%s\n%s\n", sc.cmd_format(verbose, include_branches=True), "\n".join( "%s -> %s" % ( " " * len(str(sc.revision)), rev_obj.cmd_format( False, include_branches=True, include_doc=verbose ), ) for rev_obj in ( script.get_revision(rev) for rev in sc.nextrev ) ), ) def current(config: "Config", verbose: bool = False) -> None: """Display the current revision for a database. :param config: a :class:`.Config` instance. :param verbose: output in verbose mode. """ script = ScriptDirectory.from_config(config) def display_version(rev, context): if verbose: config.print_stdout( "Current revision(s) for %s:", util.obfuscate_url_pw(context.connection.engine.url), ) for rev in script.get_all_current(rev): config.print_stdout(rev.cmd_format(verbose)) return [] with EnvironmentContext( config, script, fn=display_version, dont_mutate=True ): script.run_env() def stamp( config: "Config", revision: str, sql: bool = False, tag: Optional[str] = None, purge: bool = False, ) -> None: """'stamp' the revision table with the given revision; don't run any migrations. :param config: a :class:`.Config` instance. :param revision: target revision or list of revisions. May be a list to indicate stamping of multiple branch heads. .. note:: this parameter is called "revisions" in the command line interface. .. versionchanged:: 1.2 The revision may be a single revision or list of revisions when stamping multiple branch heads. :param sql: use ``--sql`` mode :param tag: an arbitrary "tag" that can be intercepted by custom ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument` method. :param purge: delete all entries in the version table before stamping. .. versionadded:: 1.2 """ script = ScriptDirectory.from_config(config) if sql: destination_revs = [] starting_rev = None for _revision in util.to_list(revision): if ":" in _revision: srev, _revision = _revision.split(":", 2) if starting_rev != srev: if starting_rev is None: starting_rev = srev else: raise util.CommandError( "Stamp operation with --sql only supports a " "single starting revision at a time" ) destination_revs.append(_revision) else: destination_revs = util.to_list(revision) def do_stamp(rev, context): return script._stamp_revs(util.to_tuple(destination_revs), rev) with EnvironmentContext( config, script, fn=do_stamp, as_sql=sql, starting_rev=starting_rev if sql else None, destination_rev=util.to_tuple(destination_revs), tag=tag, purge=purge, ): script.run_env() def edit(config: "Config", rev: str) -> None: """Edit revision script(s) using $EDITOR. :param config: a :class:`.Config` instance. :param rev: target revision. """ script = ScriptDirectory.from_config(config) if rev == "current": def edit_current(rev, context): if not rev: raise util.CommandError("No current revisions") for sc in script.get_revisions(rev): util.open_in_editor(sc.path) return [] with EnvironmentContext(config, script, fn=edit_current): script.run_env() else: revs = script.get_revisions(rev) if not revs: raise util.CommandError( "No revision files indicated by symbol '%s'" % rev ) for sc in revs: util.open_in_editor(sc.path) def ensure_version(config: "Config", sql: bool = False) -> None: """Create the alembic version table if it doesn't exist already . :param config: a :class:`.Config` instance. :param sql: use ``--sql`` mode .. versionadded:: 1.7.6 """ script = ScriptDirectory.from_config(config) def do_ensure_version(rev, context): context._ensure_version_table() return [] with EnvironmentContext( config, script, fn=do_ensure_version, as_sql=sql, ): script.run_env() alembic-rel_1_7_6/alembic/config.py000066400000000000000000000500711417624537100173550ustar00rootroot00000000000000from argparse import ArgumentParser from argparse import Namespace from configparser import ConfigParser import inspect import os import sys from typing import Dict from typing import Optional from typing import overload from typing import TextIO from . import __version__ from . import command from . import util from .util import compat class Config: r"""Represent an Alembic configuration. Within an ``env.py`` script, this is available via the :attr:`.EnvironmentContext.config` attribute, which in turn is available at ``alembic.context``:: from alembic import context some_param = context.config.get_main_option("my option") When invoking Alembic programatically, a new :class:`.Config` can be created by passing the name of an .ini file to the constructor:: from alembic.config import Config alembic_cfg = Config("/path/to/yourapp/alembic.ini") With a :class:`.Config` object, you can then run Alembic commands programmatically using the directives in :mod:`alembic.command`. The :class:`.Config` object can also be constructed without a filename. Values can be set programmatically, and new sections will be created as needed:: from alembic.config import Config alembic_cfg = Config() alembic_cfg.set_main_option("script_location", "myapp:migrations") alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar") alembic_cfg.set_section_option("mysection", "foo", "bar") .. warning:: When using programmatic configuration, make sure the ``env.py`` file in use is compatible with the target configuration; including that the call to Python ``logging.fileConfig()`` is omitted if the programmatic configuration doesn't actually include logging directives. For passing non-string values to environments, such as connections and engines, use the :attr:`.Config.attributes` dictionary:: with engine.begin() as connection: alembic_cfg.attributes['connection'] = connection command.upgrade(alembic_cfg, "head") :param file\_: name of the .ini file to open. :param ini_section: name of the main Alembic section within the .ini file :param output_buffer: optional file-like input buffer which will be passed to the :class:`.MigrationContext` - used to redirect the output of "offline generation" when using Alembic programmatically. :param stdout: buffer where the "print" output of commands will be sent. Defaults to ``sys.stdout``. :param config_args: A dictionary of keys and values that will be used for substitution in the alembic config file. The dictionary as given is **copied** to a new one, stored locally as the attribute ``.config_args``. When the :attr:`.Config.file_config` attribute is first invoked, the replacement variable ``here`` will be added to this dictionary before the dictionary is passed to ``ConfigParser()`` to parse the .ini file. :param attributes: optional dictionary of arbitrary Python keys/values, which will be populated into the :attr:`.Config.attributes` dictionary. .. seealso:: :ref:`connection_sharing` """ def __init__( self, file_: Optional[str] = None, ini_section: str = "alembic", output_buffer: Optional[TextIO] = None, stdout: TextIO = sys.stdout, cmd_opts: Optional[Namespace] = None, config_args: util.immutabledict = util.immutabledict(), attributes: dict = None, ) -> None: """Construct a new :class:`.Config`""" self.config_file_name = file_ self.config_ini_section = ini_section self.output_buffer = output_buffer self.stdout = stdout self.cmd_opts = cmd_opts self.config_args = dict(config_args) if attributes: self.attributes.update(attributes) cmd_opts: Optional[Namespace] = None """The command-line options passed to the ``alembic`` script. Within an ``env.py`` script this can be accessed via the :attr:`.EnvironmentContext.config` attribute. .. seealso:: :meth:`.EnvironmentContext.get_x_argument` """ config_file_name: Optional[str] = None """Filesystem path to the .ini file in use.""" config_ini_section: str = None # type:ignore[assignment] """Name of the config file section to read basic configuration from. Defaults to ``alembic``, that is the ``[alembic]`` section of the .ini file. This value is modified using the ``-n/--name`` option to the Alembic runner. """ @util.memoized_property def attributes(self): """A Python dictionary for storage of additional state. This is a utility dictionary which can include not just strings but engines, connections, schema objects, or anything else. Use this to pass objects into an env.py script, such as passing a :class:`sqlalchemy.engine.base.Connection` when calling commands from :mod:`alembic.command` programmatically. .. seealso:: :ref:`connection_sharing` :paramref:`.Config.attributes` """ return {} def print_stdout(self, text: str, *arg) -> None: """Render a message to standard out. When :meth:`.Config.print_stdout` is called with additional args those arguments will formatted against the provided text, otherwise we simply output the provided text verbatim. e.g.:: >>> config.print_stdout('Some text %s', 'arg') Some Text arg """ if arg: output = str(text) % arg else: output = str(text) util.write_outstream(self.stdout, output, "\n") @util.memoized_property def file_config(self): """Return the underlying ``ConfigParser`` object. Direct access to the .ini file is available here, though the :meth:`.Config.get_section` and :meth:`.Config.get_main_option` methods provide a possibly simpler interface. """ if self.config_file_name: here = os.path.abspath(os.path.dirname(self.config_file_name)) else: here = "" self.config_args["here"] = here file_config = ConfigParser(self.config_args) if self.config_file_name: file_config.read([self.config_file_name]) else: file_config.add_section(self.config_ini_section) return file_config def get_template_directory(self) -> str: """Return the directory where Alembic setup templates are found. This method is used by the alembic ``init`` and ``list_templates`` commands. """ import alembic package_dir = os.path.abspath(os.path.dirname(alembic.__file__)) return os.path.join(package_dir, "templates") @overload def get_section( self, name: str, default: Dict[str, str] ) -> Dict[str, str]: ... @overload def get_section( self, name: str, default: Optional[Dict[str, str]] = ... ) -> Optional[Dict[str, str]]: ... def get_section(self, name: str, default=None): """Return all the configuration options from a given .ini file section as a dictionary. """ if not self.file_config.has_section(name): return default return dict(self.file_config.items(name)) def set_main_option(self, name: str, value: str) -> None: """Set an option programmatically within the 'main' section. This overrides whatever was in the .ini file. :param name: name of the value :param value: the value. Note that this value is passed to ``ConfigParser.set``, which supports variable interpolation using pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of an interpolation symbol must therefore be escaped, e.g. ``%%``. The given value may refer to another value already in the file using the interpolation format. """ self.set_section_option(self.config_ini_section, name, value) def remove_main_option(self, name: str) -> None: self.file_config.remove_option(self.config_ini_section, name) def set_section_option(self, section: str, name: str, value: str) -> None: """Set an option programmatically within the given section. The section is created if it doesn't exist already. The value here will override whatever was in the .ini file. :param section: name of the section :param name: name of the value :param value: the value. Note that this value is passed to ``ConfigParser.set``, which supports variable interpolation using pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of an interpolation symbol must therefore be escaped, e.g. ``%%``. The given value may refer to another value already in the file using the interpolation format. """ if not self.file_config.has_section(section): self.file_config.add_section(section) self.file_config.set(section, name, value) def get_section_option( self, section: str, name: str, default: Optional[str] = None ) -> Optional[str]: """Return an option from the given section of the .ini file.""" if not self.file_config.has_section(section): raise util.CommandError( "No config file %r found, or file has no " "'[%s]' section" % (self.config_file_name, section) ) if self.file_config.has_option(section, name): return self.file_config.get(section, name) else: return default @overload def get_main_option(self, name: str, default: str) -> str: ... @overload def get_main_option( self, name: str, default: Optional[str] = None ) -> Optional[str]: ... def get_main_option(self, name, default=None): """Return an option from the 'main' section of the .ini file. This defaults to being a key from the ``[alembic]`` section, unless the ``-n/--name`` flag were used to indicate a different section. """ return self.get_section_option(self.config_ini_section, name, default) class CommandLine: def __init__(self, prog: Optional[str] = None) -> None: self._generate_args(prog) def _generate_args(self, prog: Optional[str]) -> None: def add_options(fn, parser, positional, kwargs): kwargs_opts = { "template": ( "-t", "--template", dict( default="generic", type=str, help="Setup template for use with 'init'", ), ), "message": ( "-m", "--message", dict( type=str, help="Message string to use with 'revision'" ), ), "sql": ( "--sql", dict( action="store_true", help="Don't emit SQL to database - dump to " "standard output/file instead. See docs on " "offline mode.", ), ), "tag": ( "--tag", dict( type=str, help="Arbitrary 'tag' name - can be used by " "custom env.py scripts.", ), ), "head": ( "--head", dict( type=str, help="Specify head revision or @head " "to base new revision on.", ), ), "splice": ( "--splice", dict( action="store_true", help="Allow a non-head revision as the " "'head' to splice onto", ), ), "depends_on": ( "--depends-on", dict( action="append", help="Specify one or more revision identifiers " "which this revision should depend on.", ), ), "rev_id": ( "--rev-id", dict( type=str, help="Specify a hardcoded revision id instead of " "generating one", ), ), "version_path": ( "--version-path", dict( type=str, help="Specify specific path from config for " "version file", ), ), "branch_label": ( "--branch-label", dict( type=str, help="Specify a branch label to apply to the " "new revision", ), ), "verbose": ( "-v", "--verbose", dict(action="store_true", help="Use more verbose output"), ), "resolve_dependencies": ( "--resolve-dependencies", dict( action="store_true", help="Treat dependency versions as down revisions", ), ), "autogenerate": ( "--autogenerate", dict( action="store_true", help="Populate revision script with candidate " "migration operations, based on comparison " "of database to model.", ), ), "rev_range": ( "-r", "--rev-range", dict( action="store", help="Specify a revision range; " "format is [start]:[end]", ), ), "indicate_current": ( "-i", "--indicate-current", dict( action="store_true", help="Indicate the current revision", ), ), "purge": ( "--purge", dict( action="store_true", help="Unconditionally erase the version table " "before stamping", ), ), "package": ( "--package", dict( action="store_true", help="Write empty __init__.py files to the " "environment and version locations", ), ), } positional_help = { "directory": "location of scripts directory", "revision": "revision identifier", "revisions": "one or more revisions, or 'heads' for all heads", } for arg in kwargs: if arg in kwargs_opts: args = kwargs_opts[arg] args, kw = args[0:-1], args[-1] parser.add_argument(*args, **kw) for arg in positional: if ( arg == "revisions" or fn in positional_translations and positional_translations[fn][arg] == "revisions" ): subparser.add_argument( "revisions", nargs="+", help=positional_help.get("revisions"), ) else: subparser.add_argument(arg, help=positional_help.get(arg)) parser = ArgumentParser(prog=prog) parser.add_argument( "--version", action="version", version="%%(prog)s %s" % __version__ ) parser.add_argument( "-c", "--config", type=str, default=os.environ.get("ALEMBIC_CONFIG", "alembic.ini"), help="Alternate config file; defaults to value of " 'ALEMBIC_CONFIG environment variable, or "alembic.ini"', ) parser.add_argument( "-n", "--name", type=str, default="alembic", help="Name of section in .ini file to " "use for Alembic config", ) parser.add_argument( "-x", action="append", help="Additional arguments consumed by " "custom env.py scripts, e.g. -x " "setting1=somesetting -x setting2=somesetting", ) parser.add_argument( "--raiseerr", action="store_true", help="Raise a full stack trace on error", ) subparsers = parser.add_subparsers() positional_translations = {command.stamp: {"revision": "revisions"}} for fn in [getattr(command, n) for n in dir(command)]: if ( inspect.isfunction(fn) and fn.__name__[0] != "_" and fn.__module__ == "alembic.command" ): spec = compat.inspect_getfullargspec(fn) if spec[3] is not None: positional = spec[0][1 : -len(spec[3])] kwarg = spec[0][-len(spec[3]) :] else: positional = spec[0][1:] kwarg = [] if fn in positional_translations: positional = [ positional_translations[fn].get(name, name) for name in positional ] # parse first line(s) of helptext without a line break help_ = fn.__doc__ if help_: help_text = [] for line in help_.split("\n"): if not line.strip(): break else: help_text.append(line.strip()) else: help_text = [] subparser = subparsers.add_parser( fn.__name__, help=" ".join(help_text) ) add_options(fn, subparser, positional, kwarg) subparser.set_defaults(cmd=(fn, positional, kwarg)) self.parser = parser def run_cmd(self, config: Config, options: Namespace) -> None: fn, positional, kwarg = options.cmd try: fn( config, *[getattr(options, k, None) for k in positional], **dict((k, getattr(options, k, None)) for k in kwarg) ) except util.CommandError as e: if options.raiseerr: raise else: util.err(str(e)) def main(self, argv=None): options = self.parser.parse_args(argv) if not hasattr(options, "cmd"): # see http://bugs.python.org/issue9253, argparse # behavior changed incompatibly in py3.3 self.parser.error("too few arguments") else: cfg = Config( file_=options.config, ini_section=options.name, cmd_opts=options, ) self.run_cmd(cfg, options) def main(argv=None, prog=None, **kwargs): """The console runner function for Alembic.""" CommandLine(prog=prog).main(argv=argv) if __name__ == "__main__": main() alembic-rel_1_7_6/alembic/context.py000066400000000000000000000003031417624537100175650ustar00rootroot00000000000000from .runtime.environment import EnvironmentContext # create proxy functions for # each method on the EnvironmentContext class. EnvironmentContext.create_module_class_proxy(globals(), locals()) alembic-rel_1_7_6/alembic/context.pyi000066400000000000000000000666611417624537100177610ustar00rootroot00000000000000# ### this file stubs are generated by tools/write_pyi.py - do not edit ### # ### imports are manually managed from typing import Callable from typing import ContextManager from typing import Optional from typing import TextIO from typing import Tuple from typing import TYPE_CHECKING from typing import Union if TYPE_CHECKING: from sqlalchemy.engine.base import Connection from sqlalchemy.sql.schema import MetaData from .config import Config from .runtime.migration import _ProxyTransaction from .runtime.migration import MigrationContext from .script import ScriptDirectory ### end imports ### def begin_transaction() -> Union["_ProxyTransaction", ContextManager]: """Return a context manager that will enclose an operation within a "transaction", as defined by the environment's offline and transactional DDL settings. e.g.:: with context.begin_transaction(): context.run_migrations() :meth:`.begin_transaction` is intended to "do the right thing" regardless of calling context: * If :meth:`.is_transactional_ddl` is ``False``, returns a "do nothing" context manager which otherwise produces no transactional state or directives. * If :meth:`.is_offline_mode` is ``True``, returns a context manager that will invoke the :meth:`.DefaultImpl.emit_begin` and :meth:`.DefaultImpl.emit_commit` methods, which will produce the string directives ``BEGIN`` and ``COMMIT`` on the output stream, as rendered by the target backend (e.g. SQL Server would emit ``BEGIN TRANSACTION``). * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` on the current online connection, which returns a :class:`sqlalchemy.engine.Transaction` object. This object demarcates a real transaction and is itself a context manager, which will roll back if an exception is raised. Note that a custom ``env.py`` script which has more specific transactional needs can of course manipulate the :class:`~sqlalchemy.engine.Connection` directly to produce transactional state in "online" mode. """ config: Config def configure( connection: Optional["Connection"] = None, url: Optional[str] = None, dialect_name: Optional[str] = None, dialect_opts: Optional[dict] = None, transactional_ddl: Optional[bool] = None, transaction_per_migration: bool = False, output_buffer: Optional[TextIO] = None, starting_rev: Optional[str] = None, tag: Optional[str] = None, template_args: Optional[dict] = None, render_as_batch: bool = False, target_metadata: Optional["MetaData"] = None, include_name: Optional[Callable] = None, include_object: Optional[Callable] = None, include_schemas: bool = False, process_revision_directives: Optional[Callable] = None, compare_type: bool = False, compare_server_default: bool = False, render_item: Optional[Callable] = None, literal_binds: bool = False, upgrade_token: str = "upgrades", downgrade_token: str = "downgrades", alembic_module_prefix: str = "op.", sqlalchemy_module_prefix: str = "sa.", user_module_prefix: Optional[str] = None, on_version_apply: Optional[Callable] = None, **kw ) -> None: """Configure a :class:`.MigrationContext` within this :class:`.EnvironmentContext` which will provide database connectivity and other configuration to a series of migration scripts. Many methods on :class:`.EnvironmentContext` require that this method has been called in order to function, as they ultimately need to have database access or at least access to the dialect in use. Those which do are documented as such. The important thing needed by :meth:`.configure` is a means to determine what kind of database dialect is in use. An actual connection to that database is needed only if the :class:`.MigrationContext` is to be used in "online" mode. If the :meth:`.is_offline_mode` function returns ``True``, then no connection is needed here. Otherwise, the ``connection`` parameter should be present as an instance of :class:`sqlalchemy.engine.Connection`. This function is typically called from the ``env.py`` script within a migration environment. It can be called multiple times for an invocation. The most recent :class:`~sqlalchemy.engine.Connection` for which it was called is the one that will be operated upon by the next call to :meth:`.run_migrations`. General parameters: :param connection: a :class:`~sqlalchemy.engine.Connection` to use for SQL execution in "online" mode. When present, is also used to determine the type of dialect in use. :param url: a string database url, or a :class:`sqlalchemy.engine.url.URL` object. The type of dialect to be used will be derived from this if ``connection`` is not passed. :param dialect_name: string name of a dialect, such as "postgresql", "mssql", etc. The type of dialect to be used will be derived from this if ``connection`` and ``url`` are not passed. :param dialect_opts: dictionary of options to be passed to dialect constructor. .. versionadded:: 1.0.12 :param transactional_ddl: Force the usage of "transactional" DDL on or off; this otherwise defaults to whether or not the dialect in use supports it. :param transaction_per_migration: if True, nest each migration script in a transaction rather than the full series of migrations to run. :param output_buffer: a file-like object that will be used for textual output when the ``--sql`` option is used to generate SQL scripts. Defaults to ``sys.stdout`` if not passed here and also not present on the :class:`.Config` object. The value here overrides that of the :class:`.Config` object. :param output_encoding: when using ``--sql`` to generate SQL scripts, apply this encoding to the string output. :param literal_binds: when using ``--sql`` to generate SQL scripts, pass through the ``literal_binds`` flag to the compiler so that any literal values that would ordinarily be bound parameters are converted to plain strings. .. warning:: Dialects can typically only handle simple datatypes like strings and numbers for auto-literal generation. Datatypes like dates, intervals, and others may still require manual formatting, typically using :meth:`.Operations.inline_literal`. .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy versions prior to 0.8 where this feature is not supported. .. seealso:: :meth:`.Operations.inline_literal` :param starting_rev: Override the "starting revision" argument when using ``--sql`` mode. :param tag: a string tag for usage by custom ``env.py`` scripts. Set via the ``--tag`` option, can be overridden here. :param template_args: dictionary of template arguments which will be added to the template argument environment when running the "revision" command. Note that the script environment is only run within the "revision" command if the --autogenerate option is used, or if the option "revision_environment=true" is present in the alembic.ini file. :param version_table: The name of the Alembic version table. The default is ``'alembic_version'``. :param version_table_schema: Optional schema to place version table within. :param version_table_pk: boolean, whether the Alembic version table should use a primary key constraint for the "value" column; this only takes effect when the table is first created. Defaults to True; setting to False should not be necessary and is here for backwards compatibility reasons. :param on_version_apply: a callable or collection of callables to be run for each migration step. The callables will be run in the order they are given, once for each migration step, after the respective operation has been applied but before its transaction is finalized. Each callable accepts no positional arguments and the following keyword arguments: * ``ctx``: the :class:`.MigrationContext` running the migration, * ``step``: a :class:`.MigrationInfo` representing the step currently being applied, * ``heads``: a collection of version strings representing the current heads, * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`. Parameters specific to the autogenerate feature, when ``alembic revision`` is run with the ``--autogenerate`` feature: :param target_metadata: a :class:`sqlalchemy.schema.MetaData` object, or a sequence of :class:`~sqlalchemy.schema.MetaData` objects, that will be consulted during autogeneration. The tables present in each :class:`~sqlalchemy.schema.MetaData` will be compared against what is locally available on the target :class:`~sqlalchemy.engine.Connection` to produce candidate upgrade/downgrade operations. :param compare_type: Indicates type comparison behavior during an autogenerate operation. Defaults to ``False`` which disables type comparison. Set to ``True`` to turn on default type comparison, which has varied accuracy depending on backend. See :ref:`compare_types` for an example as well as information on other type comparison options. .. seealso:: :ref:`compare_types` :paramref:`.EnvironmentContext.configure.compare_server_default` :param compare_server_default: Indicates server default comparison behavior during an autogenerate operation. Defaults to ``False`` which disables server default comparison. Set to ``True`` to turn on server default comparison, which has varied accuracy depending on backend. To customize server default comparison behavior, a callable may be specified which can filter server default comparisons during an autogenerate operation. defaults during an autogenerate operation. The format of this callable is:: def my_compare_server_default(context, inspected_column, metadata_column, inspected_default, metadata_default, rendered_metadata_default): # return True if the defaults are different, # False if not, or None to allow the default implementation # to compare these defaults return None context.configure( # ... compare_server_default = my_compare_server_default ) ``inspected_column`` is a dictionary structure as returned by :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from the local model environment. A return value of ``None`` indicates to allow default server default comparison to proceed. Note that some backends such as Postgresql actually execute the two defaults on the database side to compare for equivalence. .. seealso:: :paramref:`.EnvironmentContext.configure.compare_type` :param include_name: A callable function which is given the chance to return ``True`` or ``False`` for any database reflected object based on its name, including database schema names when the :paramref:`.EnvironmentContext.configure.include_schemas` flag is set to ``True``. The function accepts the following positional arguments: * ``name``: the name of the object, such as schema name or table name. Will be ``None`` when indicating the default schema name of the database connection. * ``type``: a string describing the type of object; currently ``"schema"``, ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, or ``"foreign_key_constraint"`` * ``parent_names``: a dictionary of "parent" object names, that are relative to the name being given. Keys in this dictionary may include: ``"schema_name"``, ``"table_name"``. E.g.:: def include_name(name, type_, parent_names): if type_ == "schema": return name in ["schema_one", "schema_two"] else: return True context.configure( # ... include_schemas = True, include_name = include_name ) .. versionadded:: 1.5 .. seealso:: :ref:`autogenerate_include_hooks` :paramref:`.EnvironmentContext.configure.include_object` :paramref:`.EnvironmentContext.configure.include_schemas` :param include_object: A callable function which is given the chance to return ``True`` or ``False`` for any object, indicating if the given object should be considered in the autogenerate sweep. The function accepts the following positional arguments: * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such as a :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.Index` :class:`~sqlalchemy.schema.UniqueConstraint`, or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object * ``name``: the name of the object. This is typically available via ``object.name``. * ``type``: a string describing the type of object; currently ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, or ``"foreign_key_constraint"`` * ``reflected``: ``True`` if the given object was produced based on table reflection, ``False`` if it's from a local :class:`.MetaData` object. * ``compare_to``: the object being compared against, if available, else ``None``. E.g.:: def include_object(object, name, type_, reflected, compare_to): if (type_ == "column" and not reflected and object.info.get("skip_autogenerate", False)): return False else: return True context.configure( # ... include_object = include_object ) For the use case of omitting specific schemas from a target database when :paramref:`.EnvironmentContext.configure.include_schemas` is set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema` attribute can be checked for each :class:`~sqlalchemy.schema.Table` object passed to the hook, however it is much more efficient to filter on schemas before reflection of objects takes place using the :paramref:`.EnvironmentContext.configure.include_name` hook. .. seealso:: :ref:`autogenerate_include_hooks` :paramref:`.EnvironmentContext.configure.include_name` :paramref:`.EnvironmentContext.configure.include_schemas` :param render_as_batch: if True, commands which alter elements within a table will be placed under a ``with batch_alter_table():`` directive, so that batch migrations will take place. .. seealso:: :ref:`batch_migrations` :param include_schemas: If True, autogenerate will scan across all schemas located by the SQLAlchemy :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` method, and include all differences in tables found across all those schemas. When using this option, you may want to also use the :paramref:`.EnvironmentContext.configure.include_name` parameter to specify a callable which can filter the tables/schemas that get included. .. seealso:: :ref:`autogenerate_include_hooks` :paramref:`.EnvironmentContext.configure.include_name` :paramref:`.EnvironmentContext.configure.include_object` :param render_item: Callable that can be used to override how any schema item, i.e. column, constraint, type, etc., is rendered for autogenerate. The callable receives a string describing the type of object, the object, and the autogen context. If it returns False, the default rendering method will be used. If it returns None, the item will not be rendered in the context of a Table construct, that is, can be used to skip columns or constraints within op.create_table():: def my_render_column(type_, col, autogen_context): if type_ == "column" and isinstance(col, MySpecialCol): return repr(col) else: return False context.configure( # ... render_item = my_render_column ) Available values for the type string include: ``"column"``, ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, ``"type"``, ``"server_default"``. .. seealso:: :ref:`autogen_render_types` :param upgrade_token: When autogenerate completes, the text of the candidate upgrade operations will be present in this template variable when ``script.py.mako`` is rendered. Defaults to ``upgrades``. :param downgrade_token: When autogenerate completes, the text of the candidate downgrade operations will be present in this template variable when ``script.py.mako`` is rendered. Defaults to ``downgrades``. :param alembic_module_prefix: When autogenerate refers to Alembic :mod:`alembic.operations` constructs, this prefix will be used (i.e. ``op.create_table``) Defaults to "``op.``". Can be ``None`` to indicate no prefix. :param sqlalchemy_module_prefix: When autogenerate refers to SQLAlchemy :class:`~sqlalchemy.schema.Column` or type classes, this prefix will be used (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". Can be ``None`` to indicate no prefix. Note that when dialect-specific types are rendered, autogenerate will render them using the dialect module name, i.e. ``mssql.BIT()``, ``postgresql.UUID()``. :param user_module_prefix: When autogenerate refers to a SQLAlchemy type (e.g. :class:`.TypeEngine`) where the module name is not under the ``sqlalchemy`` namespace, this prefix will be used within autogenerate. If left at its default of ``None``, the ``__module__`` attribute of the type is used to render the import module. It's a good practice to set this and to have all custom types be available from a fixed module space, in order to future-proof migration files against reorganizations in modules. .. seealso:: :ref:`autogen_module_prefix` :param process_revision_directives: a callable function that will be passed a structure representing the end result of an autogenerate or plain "revision" operation, which can be manipulated to affect how the ``alembic revision`` command ultimately outputs new revision scripts. The structure of the callable is:: def process_revision_directives(context, revision, directives): pass The ``directives`` parameter is a Python list containing a single :class:`.MigrationScript` directive, which represents the revision file to be generated. This list as well as its contents may be freely modified to produce any set of commands. The section :ref:`customizing_revision` shows an example of doing this. The ``context`` parameter is the :class:`.MigrationContext` in use, and ``revision`` is a tuple of revision identifiers representing the current revision of the database. The callable is invoked at all times when the ``--autogenerate`` option is passed to ``alembic revision``. If ``--autogenerate`` is not passed, the callable is invoked only if the ``revision_environment`` variable is set to True in the Alembic configuration, in which case the given ``directives`` collection will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps` collections for ``.upgrade_ops`` and ``.downgrade_ops``. The ``--autogenerate`` option itself can be inferred by inspecting ``context.config.cmd_opts.autogenerate``. The callable function may optionally be an instance of a :class:`.Rewriter` object. This is a helper object that assists in the production of autogenerate-stream rewriter functions. .. seealso:: :ref:`customizing_revision` :ref:`autogen_rewriter` :paramref:`.command.revision.process_revision_directives` Parameters specific to individual backends: :param mssql_batch_separator: The "batch separator" which will be placed between each statement when generating offline SQL Server migrations. Defaults to ``GO``. Note this is in addition to the customary semicolon ``;`` at the end of each statement; SQL Server considers the "batch separator" to denote the end of an individual statement execution, and cannot group certain dependent operations in one step. :param oracle_batch_separator: The "batch separator" which will be placed between each statement when generating offline Oracle migrations. Defaults to ``/``. Oracle doesn't add a semicolon between statements like most other backends. """ def execute(sql, execution_options=None): """Execute the given SQL using the current change context. The behavior of :meth:`.execute` is the same as that of :meth:`.Operations.execute`. Please see that function's documentation for full detail including caveats and limitations. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ def get_bind(): """Return the current 'bind'. In "online" mode, this is the :class:`sqlalchemy.engine.Connection` currently being used to emit SQL to the database. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ def get_context() -> "MigrationContext": """Return the current :class:`.MigrationContext` object. If :meth:`.EnvironmentContext.configure` has not been called yet, raises an exception. """ def get_head_revision() -> Union[str, Tuple[str, ...], None]: """Return the hex identifier of the 'head' script revision. If the script directory has multiple heads, this method raises a :class:`.CommandError`; :meth:`.EnvironmentContext.get_head_revisions` should be preferred. This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_head_revisions` """ def get_head_revisions() -> Union[str, Tuple[str, ...], None]: """Return the hex identifier of the 'heads' script revision(s). This returns a tuple containing the version number of all heads in the script directory. This function does not require that the :class:`.MigrationContext` has been configured. """ def get_revision_argument() -> Union[str, Tuple[str, ...], None]: """Get the 'destination' revision argument. This is typically the argument passed to the ``upgrade`` or ``downgrade`` command. If it was specified as ``head``, the actual version number is returned; if specified as ``base``, ``None`` is returned. This function does not require that the :class:`.MigrationContext` has been configured. """ def get_starting_revision_argument() -> Union[str, Tuple[str, ...], None]: """Return the 'starting revision' argument, if the revision was passed using ``start:end``. This is only meaningful in "offline" mode. Returns ``None`` if no value is available or was configured. This function does not require that the :class:`.MigrationContext` has been configured. """ def get_tag_argument() -> Optional[str]: """Return the value passed for the ``--tag`` argument, if any. The ``--tag`` argument is not used directly by Alembic, but is available for custom ``env.py`` configurations that wish to use it; particularly for offline generation scripts that wish to generate tagged filenames. This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_x_argument` - a newer and more open ended system of extending ``env.py`` scripts via the command line. """ def get_x_argument(as_dictionary: bool = False): """Return the value(s) passed for the ``-x`` argument, if any. The ``-x`` argument is an open ended flag that allows any user-defined value or values to be passed on the command line, then available here for consumption by a custom ``env.py`` script. The return value is a list, returned directly from the ``argparse`` structure. If ``as_dictionary=True`` is passed, the ``x`` arguments are parsed using ``key=value`` format into a dictionary that is then returned. For example, to support passing a database URL on the command line, the standard ``env.py`` script can be modified like this:: cmd_line_url = context.get_x_argument( as_dictionary=True).get('dbname') if cmd_line_url: engine = create_engine(cmd_line_url) else: engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) This then takes effect by running the ``alembic`` script as:: alembic -x dbname=postgresql://user:pass@host/dbname upgrade head This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_tag_argument` :attr:`.Config.cmd_opts` """ def is_offline_mode() -> bool: """Return True if the current migrations environment is running in "offline mode". This is ``True`` or ``False`` depending on the ``--sql`` flag passed. This function does not require that the :class:`.MigrationContext` has been configured. """ def is_transactional_ddl(): """Return True if the context is configured to expect a transactional DDL capable backend. This defaults to the type of database in use, and can be overridden by the ``transactional_ddl`` argument to :meth:`.configure` This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ def run_migrations(**kw) -> None: """Run migrations as determined by the current command line configuration as well as versioning information present (or not) in the current database connection (if one is present). The function accepts optional ``**kw`` arguments. If these are passed, they are sent directly to the ``upgrade()`` and ``downgrade()`` functions within each target revision file. By modifying the ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` functions accept arguments, parameters can be passed here so that contextual information, usually information to identify a particular database in use, can be passed from a custom ``env.py`` script to the migration functions. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ script: ScriptDirectory def static_output(text): """Emit text directly to the "offline" SQL stream. Typically this is for emitting comments that start with --. The statement is not treated as a SQL execution, no ; or batch separator is added, etc. """ alembic-rel_1_7_6/alembic/ddl/000077500000000000000000000000001417624537100162765ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/ddl/__init__.py000066400000000000000000000002111417624537100204010ustar00rootroot00000000000000from . import mssql from . import mysql from . import oracle from . import postgresql from . import sqlite from .impl import DefaultImpl alembic-rel_1_7_6/alembic/ddl/base.py000066400000000000000000000231731417624537100175700ustar00rootroot00000000000000import functools from typing import Optional from typing import TYPE_CHECKING from typing import Union from sqlalchemy import exc from sqlalchemy import Integer from sqlalchemy import types as sqltypes from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import Column from sqlalchemy.schema import DDLElement from sqlalchemy.sql.elements import quoted_name from ..util.sqla_compat import _columns_for_constraint # noqa from ..util.sqla_compat import _find_columns # noqa from ..util.sqla_compat import _fk_spec # noqa from ..util.sqla_compat import _is_type_bound # noqa from ..util.sqla_compat import _table_for_constraint # noqa if TYPE_CHECKING: from sqlalchemy.sql.compiler import Compiled from sqlalchemy.sql.compiler import DDLCompiler from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.functions import Function from sqlalchemy.sql.schema import FetchedValue from sqlalchemy.sql.type_api import TypeEngine from .impl import DefaultImpl from ..util.sqla_compat import Computed from ..util.sqla_compat import Identity _ServerDefault = Union["TextClause", "FetchedValue", "Function", str] class AlterTable(DDLElement): """Represent an ALTER TABLE statement. Only the string name and optional schema name of the table is required, not a full Table object. """ def __init__( self, table_name: str, schema: Optional[Union["quoted_name", str]] = None, ) -> None: self.table_name = table_name self.schema = schema class RenameTable(AlterTable): def __init__( self, old_table_name: str, new_table_name: Union["quoted_name", str], schema: Optional[Union["quoted_name", str]] = None, ) -> None: super(RenameTable, self).__init__(old_table_name, schema=schema) self.new_table_name = new_table_name class AlterColumn(AlterTable): def __init__( self, name: str, column_name: str, schema: Optional[str] = None, existing_type: Optional["TypeEngine"] = None, existing_nullable: Optional[bool] = None, existing_server_default: Optional[_ServerDefault] = None, existing_comment: Optional[str] = None, ) -> None: super(AlterColumn, self).__init__(name, schema=schema) self.column_name = column_name self.existing_type = ( sqltypes.to_instance(existing_type) if existing_type is not None else None ) self.existing_nullable = existing_nullable self.existing_server_default = existing_server_default self.existing_comment = existing_comment class ColumnNullable(AlterColumn): def __init__( self, name: str, column_name: str, nullable: bool, **kw ) -> None: super(ColumnNullable, self).__init__(name, column_name, **kw) self.nullable = nullable class ColumnType(AlterColumn): def __init__( self, name: str, column_name: str, type_: "TypeEngine", **kw ) -> None: super(ColumnType, self).__init__(name, column_name, **kw) self.type_ = sqltypes.to_instance(type_) class ColumnName(AlterColumn): def __init__( self, name: str, column_name: str, newname: str, **kw ) -> None: super(ColumnName, self).__init__(name, column_name, **kw) self.newname = newname class ColumnDefault(AlterColumn): def __init__( self, name: str, column_name: str, default: Optional[_ServerDefault], **kw ) -> None: super(ColumnDefault, self).__init__(name, column_name, **kw) self.default = default class ComputedColumnDefault(AlterColumn): def __init__( self, name: str, column_name: str, default: Optional["Computed"], **kw ) -> None: super(ComputedColumnDefault, self).__init__(name, column_name, **kw) self.default = default class IdentityColumnDefault(AlterColumn): def __init__( self, name: str, column_name: str, default: Optional["Identity"], impl: "DefaultImpl", **kw ) -> None: super(IdentityColumnDefault, self).__init__(name, column_name, **kw) self.default = default self.impl = impl class AddColumn(AlterTable): def __init__( self, name: str, column: "Column", schema: Optional[Union["quoted_name", str]] = None, ) -> None: super(AddColumn, self).__init__(name, schema=schema) self.column = column class DropColumn(AlterTable): def __init__( self, name: str, column: "Column", schema: Optional[str] = None ) -> None: super(DropColumn, self).__init__(name, schema=schema) self.column = column class ColumnComment(AlterColumn): def __init__( self, name: str, column_name: str, comment: Optional[str], **kw ) -> None: super(ColumnComment, self).__init__(name, column_name, **kw) self.comment = comment @compiles(RenameTable) def visit_rename_table( element: "RenameTable", compiler: "DDLCompiler", **kw ) -> str: return "%s RENAME TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, element.schema), ) @compiles(AddColumn) def visit_add_column( element: "AddColumn", compiler: "DDLCompiler", **kw ) -> str: return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), add_column(compiler, element.column, **kw), ) @compiles(DropColumn) def visit_drop_column( element: "DropColumn", compiler: "DDLCompiler", **kw ) -> str: return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), drop_column(compiler, element.column.name, **kw), ) @compiles(ColumnNullable) def visit_column_nullable( element: "ColumnNullable", compiler: "DDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "DROP NOT NULL" if element.nullable else "SET NOT NULL", ) @compiles(ColumnType) def visit_column_type( element: "ColumnType", compiler: "DDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "TYPE %s" % format_type(compiler, element.type_), ) @compiles(ColumnName) def visit_column_name( element: "ColumnName", compiler: "DDLCompiler", **kw ) -> str: return "%s RENAME %s TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname), ) @compiles(ColumnDefault) def visit_column_default( element: "ColumnDefault", compiler: "DDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "SET DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DROP DEFAULT", ) @compiles(ComputedColumnDefault) def visit_computed_column( element: "ComputedColumnDefault", compiler: "DDLCompiler", **kw ): raise exc.CompileError( 'Adding or removing a "computed" construct, e.g. GENERATED ' "ALWAYS AS, to or from an existing column is not supported." ) @compiles(IdentityColumnDefault) def visit_identity_column( element: "IdentityColumnDefault", compiler: "DDLCompiler", **kw ): raise exc.CompileError( 'Adding, removing or modifying an "identity" construct, ' "e.g. GENERATED AS IDENTITY, to or from an existing " "column is not supported in this dialect." ) def quote_dotted( name: Union["quoted_name", str], quote: functools.partial ) -> Union["quoted_name", str]: """quote the elements of a dotted name""" if isinstance(name, quoted_name): return quote(name) result = ".".join([quote(x) for x in name.split(".")]) return result def format_table_name( compiler: "Compiled", name: Union["quoted_name", str], schema: Optional[Union["quoted_name", str]], ) -> Union["quoted_name", str]: quote = functools.partial(compiler.preparer.quote) if schema: return quote_dotted(schema, quote) + "." + quote(name) else: return quote(name) def format_column_name( compiler: "DDLCompiler", name: Optional[Union["quoted_name", str]] ) -> Union["quoted_name", str]: return compiler.preparer.quote(name) def format_server_default( compiler: "DDLCompiler", default: Optional[_ServerDefault], ) -> str: return compiler.get_column_default_string( Column("x", Integer, server_default=default) ) def format_type(compiler: "DDLCompiler", type_: "TypeEngine") -> str: return compiler.dialect.type_compiler.process(type_) def alter_table( compiler: "DDLCompiler", name: str, schema: Optional[str], ) -> str: return "ALTER TABLE %s" % format_table_name(compiler, name, schema) def drop_column(compiler: "DDLCompiler", name: str, **kw) -> str: return "DROP COLUMN %s" % format_column_name(compiler, name) def alter_column(compiler: "DDLCompiler", name: str) -> str: return "ALTER COLUMN %s" % format_column_name(compiler, name) def add_column(compiler: "DDLCompiler", column: "Column", **kw) -> str: text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) const = " ".join( compiler.process(constraint) for constraint in column.constraints ) if const: text += " " + const return text alembic-rel_1_7_6/alembic/ddl/impl.py000066400000000000000000000547171417624537100176270ustar00rootroot00000000000000from collections import namedtuple import re from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Sequence from typing import Set from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union from sqlalchemy import cast from sqlalchemy import schema from sqlalchemy import text from . import base from .. import util from ..util import sqla_compat if TYPE_CHECKING: from io import StringIO from typing import Literal from sqlalchemy.engine import Connection from sqlalchemy.engine import Dialect from sqlalchemy.engine.cursor import CursorResult from sqlalchemy.engine.cursor import LegacyCursorResult from sqlalchemy.engine.reflection import Inspector from sqlalchemy.sql.dml import Update from sqlalchemy.sql.elements import ClauseElement from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import UniqueConstraint from sqlalchemy.sql.selectable import TableClause from sqlalchemy.sql.type_api import TypeEngine from .base import _ServerDefault from ..autogenerate.api import AutogenContext from ..operations.batch import ApplyBatchImpl from ..operations.batch import BatchOperationsImpl class ImplMeta(type): def __init__( cls, classname: str, bases: Tuple[Type["DefaultImpl"]], dict_: Dict[str, Any], ): newtype = type.__init__(cls, classname, bases, dict_) if "__dialect__" in dict_: _impls[dict_["__dialect__"]] = cls return newtype _impls: dict = {} Params = namedtuple("Params", ["token0", "tokens", "args", "kwargs"]) class DefaultImpl(metaclass=ImplMeta): """Provide the entrypoint for major migration operations, including database-specific behavioral variances. While individual SQL/DDL constructs already provide for database-specific implementations, variances here allow for entirely different sequences of operations to take place for a particular migration, such as SQL Server's special 'IDENTITY INSERT' step for bulk inserts. """ __dialect__ = "default" transactional_ddl = False command_terminator = ";" type_synonyms: Tuple[Set[str], ...] = ({"NUMERIC", "DECIMAL"},) type_arg_extract: Sequence[str] = () # on_null is known to be supported only by oracle identity_attrs_ignore: Tuple[str, ...] = ("on_null",) def __init__( self, dialect: "Dialect", connection: Optional["Connection"], as_sql: bool, transactional_ddl: Optional[bool], output_buffer: Optional["StringIO"], context_opts: Dict[str, Any], ) -> None: self.dialect = dialect self.connection = connection self.as_sql = as_sql self.literal_binds = context_opts.get("literal_binds", False) self.output_buffer = output_buffer self.memo: dict = {} self.context_opts = context_opts if transactional_ddl is not None: self.transactional_ddl = transactional_ddl if self.literal_binds: if not self.as_sql: raise util.CommandError( "Can't use literal_binds setting without as_sql mode" ) @classmethod def get_by_dialect(cls, dialect: "Dialect") -> Any: return _impls[dialect.name] def static_output(self, text: str) -> None: assert self.output_buffer is not None self.output_buffer.write(text + "\n\n") self.output_buffer.flush() def requires_recreate_in_batch( self, batch_op: "BatchOperationsImpl" ) -> bool: """Return True if the given :class:`.BatchOperationsImpl` would need the table to be recreated and copied in order to proceed. Normally, only returns True on SQLite when operations other than add_column are present. """ return False def prep_table_for_batch( self, batch_impl: "ApplyBatchImpl", table: "Table" ) -> None: """perform any operations needed on a table before a new one is created to replace it in batch mode. the PG dialect uses this to drop constraints on the table before the new one uses those same names. """ @property def bind(self) -> Optional["Connection"]: return self.connection def _exec( self, construct: Union["ClauseElement", str], execution_options: None = None, multiparams: Sequence[dict] = (), params: Dict[str, int] = util.immutabledict(), ) -> Optional[Union["LegacyCursorResult", "CursorResult"]]: if isinstance(construct, str): construct = text(construct) if self.as_sql: if multiparams or params: # TODO: coverage raise Exception("Execution arguments not allowed with as_sql") if self.literal_binds and not isinstance( construct, schema.DDLElement ): compile_kw = dict(compile_kwargs={"literal_binds": True}) else: compile_kw = {} self.static_output( str(construct.compile(dialect=self.dialect, **compile_kw)) .replace("\t", " ") .strip() + self.command_terminator ) return None else: conn = self.connection assert conn is not None if execution_options: conn = conn.execution_options(**execution_options) if params: assert isinstance(multiparams, tuple) multiparams += (params,) return conn.execute(construct, multiparams) def execute( self, sql: Union["Update", "TextClause", str], execution_options: None = None, ) -> None: self._exec(sql, execution_options) def alter_column( self, table_name: str, column_name: str, nullable: Optional[bool] = None, server_default: Union["_ServerDefault", "Literal[False]"] = False, name: Optional[str] = None, type_: Optional["TypeEngine"] = None, schema: Optional[str] = None, autoincrement: Optional[bool] = None, comment: Optional[Union[str, "Literal[False]"]] = False, existing_comment: Optional[str] = None, existing_type: Optional["TypeEngine"] = None, existing_server_default: Optional["_ServerDefault"] = None, existing_nullable: Optional[bool] = None, existing_autoincrement: Optional[bool] = None, **kw: Any ) -> None: if autoincrement is not None or existing_autoincrement is not None: util.warn( "autoincrement and existing_autoincrement " "only make sense for MySQL", stacklevel=3, ) if nullable is not None: self._exec( base.ColumnNullable( table_name, column_name, nullable, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, ) ) if server_default is not False: kw = {} cls_: Type[ Union[ base.ComputedColumnDefault, base.IdentityColumnDefault, base.ColumnDefault, ] ] if sqla_compat._server_default_is_computed( server_default, existing_server_default ): cls_ = base.ComputedColumnDefault elif sqla_compat._server_default_is_identity( server_default, existing_server_default ): cls_ = base.IdentityColumnDefault kw["impl"] = self else: cls_ = base.ColumnDefault self._exec( cls_( table_name, column_name, server_default, # type:ignore[arg-type] schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, **kw ) ) if type_ is not None: self._exec( base.ColumnType( table_name, column_name, type_, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, ) ) if comment is not False: self._exec( base.ColumnComment( table_name, column_name, comment, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, ) ) # do the new name last ;) if name is not None: self._exec( base.ColumnName( table_name, column_name, name, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, ) ) def add_column( self, table_name: str, column: "Column", schema: Optional[Union[str, "quoted_name"]] = None, ) -> None: self._exec(base.AddColumn(table_name, column, schema=schema)) def drop_column( self, table_name: str, column: "Column", schema: Optional[str] = None, **kw ) -> None: self._exec(base.DropColumn(table_name, column, schema=schema)) def add_constraint(self, const: Any) -> None: if const._create_rule is None or const._create_rule(self): self._exec(schema.AddConstraint(const)) def drop_constraint(self, const: "Constraint") -> None: self._exec(schema.DropConstraint(const)) def rename_table( self, old_table_name: str, new_table_name: Union[str, "quoted_name"], schema: Optional[Union[str, "quoted_name"]] = None, ) -> None: self._exec( base.RenameTable(old_table_name, new_table_name, schema=schema) ) def create_table(self, table: "Table") -> None: table.dispatch.before_create( table, self.connection, checkfirst=False, _ddl_runner=self ) self._exec(schema.CreateTable(table)) table.dispatch.after_create( table, self.connection, checkfirst=False, _ddl_runner=self ) for index in table.indexes: self._exec(schema.CreateIndex(index)) with_comment = ( self.dialect.supports_comments and not self.dialect.inline_comments ) comment = table.comment if comment and with_comment: self.create_table_comment(table) for column in table.columns: comment = column.comment if comment and with_comment: self.create_column_comment(column) def drop_table(self, table: "Table") -> None: self._exec(schema.DropTable(table)) def create_index(self, index: "Index") -> None: self._exec(schema.CreateIndex(index)) def create_table_comment(self, table: "Table") -> None: self._exec(schema.SetTableComment(table)) def drop_table_comment(self, table: "Table") -> None: self._exec(schema.DropTableComment(table)) def create_column_comment(self, column: "ColumnElement") -> None: self._exec(schema.SetColumnComment(column)) def drop_index(self, index: "Index") -> None: self._exec(schema.DropIndex(index)) def bulk_insert( self, table: Union["TableClause", "Table"], rows: List[dict], multiinsert: bool = True, ) -> None: if not isinstance(rows, list): raise TypeError("List expected") elif rows and not isinstance(rows[0], dict): raise TypeError("List of dictionaries expected") if self.as_sql: for row in rows: self._exec( sqla_compat._insert_inline(table).values( **dict( ( k, sqla_compat._literal_bindparam( k, v, type_=table.c[k].type ) if not isinstance( v, sqla_compat._literal_bindparam ) else v, ) for k, v in row.items() ) ) ) else: # work around http://www.sqlalchemy.org/trac/ticket/2461 if not hasattr(table, "_autoincrement_column"): table._autoincrement_column = None if rows: if multiinsert: self._exec( sqla_compat._insert_inline(table), multiparams=rows ) else: for row in rows: self._exec( sqla_compat._insert_inline(table).values(**row) ) def _tokenize_column_type(self, column: "Column") -> Params: definition = self.dialect.type_compiler.process(column.type).lower() # tokenize the SQLAlchemy-generated version of a type, so that # the two can be compared. # # examples: # NUMERIC(10, 5) # TIMESTAMP WITH TIMEZONE # INTEGER UNSIGNED # INTEGER (10) UNSIGNED # INTEGER(10) UNSIGNED # varchar character set utf8 # tokens = re.findall(r"[\w\-_]+|\(.+?\)", definition) term_tokens = [] paren_term = None for token in tokens: if re.match(r"^\(.*\)$", token): paren_term = token else: term_tokens.append(token) params = Params(term_tokens[0], term_tokens[1:], [], {}) if paren_term: for term in re.findall("[^(),]+", paren_term): if "=" in term: key, val = term.split("=") params.kwargs[key.strip()] = val.strip() else: params.args.append(term.strip()) return params def _column_types_match( self, inspector_params: "Params", metadata_params: "Params" ) -> bool: if inspector_params.token0 == metadata_params.token0: return True synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms] inspector_all_terms = " ".join( [inspector_params.token0] + inspector_params.tokens ) metadata_all_terms = " ".join( [metadata_params.token0] + metadata_params.tokens ) for batch in synonyms: if {inspector_all_terms, metadata_all_terms}.issubset(batch) or { inspector_params.token0, metadata_params.token0, }.issubset(batch): return True return False def _column_args_match( self, inspected_params: "Params", meta_params: "Params" ) -> bool: """We want to compare column parameters. However, we only want to compare parameters that are set. If they both have `collation`, we want to make sure they are the same. However, if only one specifies it, dont flag it for being less specific """ if ( len(meta_params.tokens) == len(inspected_params.tokens) and meta_params.tokens != inspected_params.tokens ): return False if ( len(meta_params.args) == len(inspected_params.args) and meta_params.args != inspected_params.args ): return False insp = " ".join(inspected_params.tokens).lower() meta = " ".join(meta_params.tokens).lower() for reg in self.type_arg_extract: mi = re.search(reg, insp) mm = re.search(reg, meta) if mi and mm and mi.group(1) != mm.group(1): return False return True def compare_type( self, inspector_column: "Column", metadata_column: "Column" ) -> bool: """Returns True if there ARE differences between the types of the two columns. Takes impl.type_synonyms into account between retrospected and metadata types """ inspector_params = self._tokenize_column_type(inspector_column) metadata_params = self._tokenize_column_type(metadata_column) if not self._column_types_match(inspector_params, metadata_params): return True if not self._column_args_match(inspector_params, metadata_params): return True return False def compare_server_default( self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default, ): return rendered_inspector_default != rendered_metadata_default def correct_for_autogen_constraints( self, conn_uniques: Set["UniqueConstraint"], conn_indexes: Set["Index"], metadata_unique_constraints: Set["UniqueConstraint"], metadata_indexes: Set["Index"], ) -> None: pass def cast_for_batch_migrate(self, existing, existing_transfer, new_type): if existing.type._type_affinity is not new_type._type_affinity: existing_transfer["expr"] = cast( existing_transfer["expr"], new_type ) def render_ddl_sql_expr( self, expr: "ClauseElement", is_server_default: bool = False, **kw ) -> str: """Render a SQL expression that is typically a server default, index expression, etc. .. versionadded:: 1.0.11 """ compile_kw = dict( compile_kwargs={"literal_binds": True, "include_table": False} ) return str(expr.compile(dialect=self.dialect, **compile_kw)) def _compat_autogen_column_reflect( self, inspector: "Inspector" ) -> Callable: return self.autogen_column_reflect def correct_for_autogen_foreignkeys( self, conn_fks: Set["ForeignKeyConstraint"], metadata_fks: Set["ForeignKeyConstraint"], ) -> None: pass def autogen_column_reflect(self, inspector, table, column_info): """A hook that is attached to the 'column_reflect' event for when a Table is reflected from the database during the autogenerate process. Dialects can elect to modify the information gathered here. """ def start_migrations(self) -> None: """A hook called when :meth:`.EnvironmentContext.run_migrations` is called. Implementations can set up per-migration-run state here. """ def emit_begin(self) -> None: """Emit the string ``BEGIN``, or the backend-specific equivalent, on the current connection context. This is used in offline mode and typically via :meth:`.EnvironmentContext.begin_transaction`. """ self.static_output("BEGIN" + self.command_terminator) def emit_commit(self) -> None: """Emit the string ``COMMIT``, or the backend-specific equivalent, on the current connection context. This is used in offline mode and typically via :meth:`.EnvironmentContext.begin_transaction`. """ self.static_output("COMMIT" + self.command_terminator) def render_type( self, type_obj: "TypeEngine", autogen_context: "AutogenContext" ) -> Union[str, "Literal[False]"]: return False def _compare_identity_default(self, metadata_identity, inspector_identity): # ignored contains the attributes that were not considered # because assumed to their default values in the db. diff, ignored = _compare_identity_options( sqla_compat._identity_attrs, metadata_identity, inspector_identity, sqla_compat.Identity(), ) meta_always = getattr(metadata_identity, "always", None) inspector_always = getattr(inspector_identity, "always", None) # None and False are the same in this comparison if bool(meta_always) != bool(inspector_always): diff.add("always") diff.difference_update(self.identity_attrs_ignore) # returns 3 values: return ( # different identity attributes diff, # ignored identity attributes ignored, # if the two identity should be considered different bool(diff) or bool(metadata_identity) != bool(inspector_identity), ) def _compare_identity_options( attributes, metadata_io, inspector_io, default_io ): # this can be used for identity or sequence compare. # default_io is an instance of IdentityOption with all attributes to the # default value. diff = set() ignored_attr = set() for attr in attributes: meta_value = getattr(metadata_io, attr, None) default_value = getattr(default_io, attr, None) conn_value = getattr(inspector_io, attr, None) if conn_value != meta_value: if meta_value == default_value: ignored_attr.add(attr) else: diff.add(attr) return diff, ignored_attr alembic-rel_1_7_6/alembic/ddl/mssql.py000066400000000000000000000320411417624537100200070ustar00rootroot00000000000000from typing import Any from typing import List from typing import Optional from typing import TYPE_CHECKING from typing import Union from sqlalchemy import types as sqltypes from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import Column from sqlalchemy.schema import CreateIndex from sqlalchemy.sql.base import Executable from sqlalchemy.sql.elements import ClauseElement from .base import AddColumn from .base import alter_column from .base import alter_table from .base import ColumnDefault from .base import ColumnName from .base import ColumnNullable from .base import ColumnType from .base import format_column_name from .base import format_server_default from .base import format_table_name from .base import format_type from .base import RenameTable from .impl import DefaultImpl from .. import util from ..util import sqla_compat if TYPE_CHECKING: from typing import Literal from sqlalchemy.dialects.mssql.base import MSDDLCompiler from sqlalchemy.dialects.mssql.base import MSSQLCompiler from sqlalchemy.engine.cursor import CursorResult from sqlalchemy.engine.cursor import LegacyCursorResult from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import Table from sqlalchemy.sql.selectable import TableClause from sqlalchemy.sql.type_api import TypeEngine from .base import _ServerDefault class MSSQLImpl(DefaultImpl): __dialect__ = "mssql" transactional_ddl = True batch_separator = "GO" type_synonyms = DefaultImpl.type_synonyms + ({"VARCHAR", "NVARCHAR"},) identity_attrs_ignore = ( "minvalue", "maxvalue", "nominvalue", "nomaxvalue", "cycle", "cache", "order", "on_null", "order", ) def __init__(self, *arg, **kw) -> None: super(MSSQLImpl, self).__init__(*arg, **kw) self.batch_separator = self.context_opts.get( "mssql_batch_separator", self.batch_separator ) def _exec( self, construct: Any, *args, **kw ) -> Optional[Union["LegacyCursorResult", "CursorResult"]]: result = super(MSSQLImpl, self)._exec(construct, *args, **kw) if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) return result def emit_begin(self) -> None: self.static_output("BEGIN TRANSACTION" + self.command_terminator) def emit_commit(self) -> None: super(MSSQLImpl, self).emit_commit() if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) def alter_column( # type:ignore[override] self, table_name: str, column_name: str, nullable: Optional[bool] = None, server_default: Optional[ Union["_ServerDefault", "Literal[False]"] ] = False, name: Optional[str] = None, type_: Optional["TypeEngine"] = None, schema: Optional[str] = None, existing_type: Optional["TypeEngine"] = None, existing_server_default: Optional["_ServerDefault"] = None, existing_nullable: Optional[bool] = None, **kw: Any ) -> None: if nullable is not None: if existing_type is None: if type_ is not None: existing_type = type_ # the NULL/NOT NULL alter will handle # the type alteration type_ = None else: raise util.CommandError( "MS-SQL ALTER COLUMN operations " "with NULL or NOT NULL require the " "existing_type or a new type_ be passed." ) elif type_ is not None: # the NULL/NOT NULL alter will handle # the type alteration existing_type = type_ type_ = None used_default = False if sqla_compat._server_default_is_identity( server_default, existing_server_default ) or sqla_compat._server_default_is_computed( server_default, existing_server_default ): used_default = True kw["server_default"] = server_default kw["existing_server_default"] = existing_server_default super(MSSQLImpl, self).alter_column( table_name, column_name, nullable=nullable, type_=type_, schema=schema, existing_type=existing_type, existing_nullable=existing_nullable, **kw ) if server_default is not False and used_default is False: if existing_server_default is not False or server_default is None: self._exec( _ExecDropConstraint( table_name, column_name, "sys.default_constraints", schema, ) ) if server_default is not None: super(MSSQLImpl, self).alter_column( table_name, column_name, schema=schema, server_default=server_default, ) if name is not None: super(MSSQLImpl, self).alter_column( table_name, column_name, schema=schema, name=name ) def create_index(self, index: "Index") -> None: # this likely defaults to None if not present, so get() # should normally not return the default value. being # defensive in any case mssql_include = index.kwargs.get("mssql_include", None) or () assert index.table is not None for col in mssql_include: if col not in index.table.c: index.table.append_column(Column(col, sqltypes.NullType)) self._exec(CreateIndex(index)) def bulk_insert( # type:ignore[override] self, table: Union["TableClause", "Table"], rows: List[dict], **kw: Any ) -> None: if self.as_sql: self._exec( "SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(table) ) super(MSSQLImpl, self).bulk_insert(table, rows, **kw) self._exec( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.format_table(table) ) else: super(MSSQLImpl, self).bulk_insert(table, rows, **kw) def drop_column( self, table_name: str, column: "Column", schema: Optional[str] = None, **kw ) -> None: drop_default = kw.pop("mssql_drop_default", False) if drop_default: self._exec( _ExecDropConstraint( table_name, column, "sys.default_constraints", schema ) ) drop_check = kw.pop("mssql_drop_check", False) if drop_check: self._exec( _ExecDropConstraint( table_name, column, "sys.check_constraints", schema ) ) drop_fks = kw.pop("mssql_drop_foreign_key", False) if drop_fks: self._exec(_ExecDropFKConstraint(table_name, column, schema)) super(MSSQLImpl, self).drop_column( table_name, column, schema=schema, **kw ) def compare_server_default( self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default, ): def clean(value): if value is not None: value = value.strip() while value[0] == "(" and value[-1] == ")": value = value[1:-1] return value return clean(rendered_inspector_default) != clean( rendered_metadata_default ) def _compare_identity_default(self, metadata_identity, inspector_identity): diff, ignored, is_alter = super( MSSQLImpl, self )._compare_identity_default(metadata_identity, inspector_identity) if ( metadata_identity is None and inspector_identity is not None and not diff and inspector_identity.column is not None and inspector_identity.column.primary_key ): # mssql reflect primary keys with autoincrement as identity # columns. if no different attributes are present ignore them is_alter = False return diff, ignored, is_alter class _ExecDropConstraint(Executable, ClauseElement): inherit_cache = False def __init__( self, tname: str, colname: Union["Column", str], type_: str, schema: Optional[str], ) -> None: self.tname = tname self.colname = colname self.type_ = type_ self.schema = schema class _ExecDropFKConstraint(Executable, ClauseElement): inherit_cache = False def __init__( self, tname: str, colname: "Column", schema: Optional[str] ) -> None: self.tname = tname self.colname = colname self.schema = schema @compiles(_ExecDropConstraint, "mssql") def _exec_drop_col_constraint( element: "_ExecDropConstraint", compiler: "MSSQLCompiler", **kw ) -> str: schema, tname, colname, type_ = ( element.schema, element.tname, element.colname, element.type_, ) # from http://www.mssqltips.com/sqlservertip/1425/\ # working-with-default-constraints-in-sql-server/ # TODO: needs table formatting, etc. return """declare @const_name varchar(256) select @const_name = [name] from %(type)s where parent_object_id = object_id('%(schema_dot)s%(tname)s') and col_name(parent_object_id, parent_column_id) = '%(colname)s' exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { "type": type_, "tname": tname, "colname": colname, "tname_quoted": format_table_name(compiler, tname, schema), "schema_dot": schema + "." if schema else "", } @compiles(_ExecDropFKConstraint, "mssql") def _exec_drop_col_fk_constraint( element: "_ExecDropFKConstraint", compiler: "MSSQLCompiler", **kw ) -> str: schema, tname, colname = element.schema, element.tname, element.colname return """declare @const_name varchar(256) select @const_name = [name] from sys.foreign_keys fk join sys.foreign_key_columns fkc on fk.object_id=fkc.constraint_object_id where fkc.parent_object_id = object_id('%(schema_dot)s%(tname)s') and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s' exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { "tname": tname, "colname": colname, "tname_quoted": format_table_name(compiler, tname, schema), "schema_dot": schema + "." if schema else "", } @compiles(AddColumn, "mssql") def visit_add_column( element: "AddColumn", compiler: "MSDDLCompiler", **kw ) -> str: return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), mssql_add_column(compiler, element.column, **kw), ) def mssql_add_column(compiler: "MSDDLCompiler", column: "Column", **kw) -> str: return "ADD %s" % compiler.get_column_specification(column, **kw) @compiles(ColumnNullable, "mssql") def visit_column_nullable( element: "ColumnNullable", compiler: "MSDDLCompiler", **kw ) -> str: return "%s %s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), format_type(compiler, element.existing_type), "NULL" if element.nullable else "NOT NULL", ) @compiles(ColumnDefault, "mssql") def visit_column_default( element: "ColumnDefault", compiler: "MSDDLCompiler", **kw ) -> str: # TODO: there can also be a named constraint # with ADD CONSTRAINT here return "%s ADD DEFAULT %s FOR %s" % ( alter_table(compiler, element.table_name, element.schema), format_server_default(compiler, element.default), format_column_name(compiler, element.column_name), ) @compiles(ColumnName, "mssql") def visit_rename_column( element: "ColumnName", compiler: "MSDDLCompiler", **kw ) -> str: return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % ( format_table_name(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname), ) @compiles(ColumnType, "mssql") def visit_column_type( element: "ColumnType", compiler: "MSDDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), format_type(compiler, element.type_), ) @compiles(RenameTable, "mssql") def visit_rename_table( element: "RenameTable", compiler: "MSDDLCompiler", **kw ) -> str: return "EXEC sp_rename '%s', %s" % ( format_table_name(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, None), ) alembic-rel_1_7_6/alembic/ddl/mysql.py000066400000000000000000000377761417624537100200410ustar00rootroot00000000000000import re from typing import Any from typing import Optional from typing import TYPE_CHECKING from typing import Union from sqlalchemy import schema from sqlalchemy import types as sqltypes from sqlalchemy.ext.compiler import compiles from .base import alter_table from .base import AlterColumn from .base import ColumnDefault from .base import ColumnName from .base import ColumnNullable from .base import ColumnType from .base import format_column_name from .base import format_server_default from .impl import DefaultImpl from .. import util from ..autogenerate import compare from ..util import sqla_compat from ..util.sqla_compat import _is_mariadb from ..util.sqla_compat import _is_type_bound if TYPE_CHECKING: from typing import Literal from sqlalchemy.dialects.mysql.base import MySQLDDLCompiler from sqlalchemy.sql.ddl import DropConstraint from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.type_api import TypeEngine from .base import _ServerDefault class MySQLImpl(DefaultImpl): __dialect__ = "mysql" transactional_ddl = False type_synonyms = DefaultImpl.type_synonyms + ( {"BOOL", "TINYINT"}, {"JSON", "LONGTEXT"}, ) type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"] def alter_column( # type:ignore[override] self, table_name: str, column_name: str, nullable: Optional[bool] = None, server_default: Union["_ServerDefault", "Literal[False]"] = False, name: Optional[str] = None, type_: Optional["TypeEngine"] = None, schema: Optional[str] = None, existing_type: Optional["TypeEngine"] = None, existing_server_default: Optional["_ServerDefault"] = None, existing_nullable: Optional[bool] = None, autoincrement: Optional[bool] = None, existing_autoincrement: Optional[bool] = None, comment: Optional[Union[str, "Literal[False]"]] = False, existing_comment: Optional[str] = None, **kw: Any ) -> None: if sqla_compat._server_default_is_identity( server_default, existing_server_default ) or sqla_compat._server_default_is_computed( server_default, existing_server_default ): # modifying computed or identity columns is not supported # the default will raise super(MySQLImpl, self).alter_column( table_name, column_name, nullable=nullable, type_=type_, schema=schema, existing_type=existing_type, existing_nullable=existing_nullable, server_default=server_default, existing_server_default=existing_server_default, **kw ) if name is not None or self._is_mysql_allowed_functional_default( type_ if type_ is not None else existing_type, server_default ): self._exec( MySQLChangeColumn( table_name, column_name, schema=schema, newname=name if name is not None else column_name, nullable=nullable if nullable is not None else existing_nullable if existing_nullable is not None else True, type_=type_ if type_ is not None else existing_type, default=server_default if server_default is not False else existing_server_default, autoincrement=autoincrement if autoincrement is not None else existing_autoincrement, comment=comment if comment is not False else existing_comment, ) ) elif ( nullable is not None or type_ is not None or autoincrement is not None or comment is not False ): self._exec( MySQLModifyColumn( table_name, column_name, schema=schema, newname=name if name is not None else column_name, nullable=nullable if nullable is not None else existing_nullable if existing_nullable is not None else True, type_=type_ if type_ is not None else existing_type, default=server_default if server_default is not False else existing_server_default, autoincrement=autoincrement if autoincrement is not None else existing_autoincrement, comment=comment if comment is not False else existing_comment, ) ) elif server_default is not False: self._exec( MySQLAlterDefault( table_name, column_name, server_default, schema=schema ) ) def drop_constraint( self, const: "Constraint", ) -> None: if isinstance(const, schema.CheckConstraint) and _is_type_bound(const): return super(MySQLImpl, self).drop_constraint(const) def _is_mysql_allowed_functional_default( self, type_: Optional["TypeEngine"], server_default: Union["_ServerDefault", "Literal[False]"], ) -> bool: return ( type_ is not None and type_._type_affinity # type:ignore[attr-defined] is sqltypes.DateTime and server_default is not None ) def compare_server_default( self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default, ): # partially a workaround for SQLAlchemy issue #3023; if the # column were created without "NOT NULL", MySQL may have added # an implicit default of '0' which we need to skip # TODO: this is not really covered anymore ? if ( metadata_column.type._type_affinity is sqltypes.Integer and inspector_column.primary_key and not inspector_column.autoincrement and not rendered_metadata_default and rendered_inspector_default == "'0'" ): return False elif inspector_column.type._type_affinity is sqltypes.Integer: rendered_inspector_default = ( re.sub(r"^'|'$", "", rendered_inspector_default) if rendered_inspector_default is not None else None ) return rendered_inspector_default != rendered_metadata_default elif rendered_inspector_default and rendered_metadata_default: # adjust for "function()" vs. "FUNCTION" as can occur particularly # for the CURRENT_TIMESTAMP function on newer MariaDB versions # SQLAlchemy MySQL dialect bundles ON UPDATE into the server # default; adjust for this possibly being present. onupdate_ins = re.match( r"(.*) (on update.*?)(?:\(\))?$", rendered_inspector_default.lower(), ) onupdate_met = re.match( r"(.*) (on update.*?)(?:\(\))?$", rendered_metadata_default.lower(), ) if onupdate_ins: if not onupdate_met: return True elif onupdate_ins.group(2) != onupdate_met.group(2): return True rendered_inspector_default = onupdate_ins.group(1) rendered_metadata_default = onupdate_met.group(1) return re.sub( r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower() ) != re.sub( r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower() ) else: return rendered_inspector_default != rendered_metadata_default def correct_for_autogen_constraints( self, conn_unique_constraints, conn_indexes, metadata_unique_constraints, metadata_indexes, ): # TODO: if SQLA 1.0, make use of "duplicates_index" # metadata removed = set() for idx in list(conn_indexes): if idx.unique: continue # MySQL puts implicit indexes on FK columns, even if # composite and even if MyISAM, so can't check this too easily. # the name of the index may be the column name or it may # be the name of the FK constraint. for col in idx.columns: if idx.name == col.name: conn_indexes.remove(idx) removed.add(idx.name) break for fk in col.foreign_keys: if fk.name == idx.name: conn_indexes.remove(idx) removed.add(idx.name) break if idx.name in removed: break # then remove indexes from the "metadata_indexes" # that we've removed from reflected, otherwise they come out # as adds (see #202) for idx in list(metadata_indexes): if idx.name in removed: metadata_indexes.remove(idx) def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks): conn_fk_by_sig = dict( (compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks ) metadata_fk_by_sig = dict( (compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks ) for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig): mdfk = metadata_fk_by_sig[sig] cnfk = conn_fk_by_sig[sig] # MySQL considers RESTRICT to be the default and doesn't # report on it. if the model has explicit RESTRICT and # the conn FK has None, set it to RESTRICT if ( mdfk.ondelete is not None and mdfk.ondelete.lower() == "restrict" and cnfk.ondelete is None ): cnfk.ondelete = "RESTRICT" if ( mdfk.onupdate is not None and mdfk.onupdate.lower() == "restrict" and cnfk.onupdate is None ): cnfk.onupdate = "RESTRICT" class MariaDBImpl(MySQLImpl): __dialect__ = "mariadb" class MySQLAlterDefault(AlterColumn): def __init__( self, name: str, column_name: str, default: "_ServerDefault", schema: Optional[str] = None, ) -> None: super(AlterColumn, self).__init__(name, schema=schema) self.column_name = column_name self.default = default class MySQLChangeColumn(AlterColumn): def __init__( self, name: str, column_name: str, schema: Optional[str] = None, newname: Optional[str] = None, type_: Optional["TypeEngine"] = None, nullable: Optional[bool] = None, default: Optional[Union["_ServerDefault", "Literal[False]"]] = False, autoincrement: Optional[bool] = None, comment: Optional[Union[str, "Literal[False]"]] = False, ) -> None: super(AlterColumn, self).__init__(name, schema=schema) self.column_name = column_name self.nullable = nullable self.newname = newname self.default = default self.autoincrement = autoincrement self.comment = comment if type_ is None: raise util.CommandError( "All MySQL CHANGE/MODIFY COLUMN operations " "require the existing type." ) self.type_ = sqltypes.to_instance(type_) class MySQLModifyColumn(MySQLChangeColumn): pass @compiles(ColumnNullable, "mysql", "mariadb") @compiles(ColumnName, "mysql", "mariadb") @compiles(ColumnDefault, "mysql", "mariadb") @compiles(ColumnType, "mysql", "mariadb") def _mysql_doesnt_support_individual(element, compiler, **kw): raise NotImplementedError( "Individual alter column constructs not supported by MySQL" ) @compiles(MySQLAlterDefault, "mysql", "mariadb") def _mysql_alter_default( element: "MySQLAlterDefault", compiler: "MySQLDDLCompiler", **kw ) -> str: return "%s ALTER COLUMN %s %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), "SET DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DROP DEFAULT", ) @compiles(MySQLModifyColumn, "mysql", "mariadb") def _mysql_modify_column( element: "MySQLModifyColumn", compiler: "MySQLDDLCompiler", **kw ) -> str: return "%s MODIFY %s %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), _mysql_colspec( compiler, nullable=element.nullable, server_default=element.default, type_=element.type_, autoincrement=element.autoincrement, comment=element.comment, ), ) @compiles(MySQLChangeColumn, "mysql", "mariadb") def _mysql_change_column( element: "MySQLChangeColumn", compiler: "MySQLDDLCompiler", **kw ) -> str: return "%s CHANGE %s %s %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname), _mysql_colspec( compiler, nullable=element.nullable, server_default=element.default, type_=element.type_, autoincrement=element.autoincrement, comment=element.comment, ), ) def _mysql_colspec( compiler: "MySQLDDLCompiler", nullable: Optional[bool], server_default: Optional[Union["_ServerDefault", "Literal[False]"]], type_: "TypeEngine", autoincrement: Optional[bool], comment: Optional[Union[str, "Literal[False]"]], ) -> str: spec = "%s %s" % ( compiler.dialect.type_compiler.process(type_), "NULL" if nullable else "NOT NULL", ) if autoincrement: spec += " AUTO_INCREMENT" if server_default is not False and server_default is not None: spec += " DEFAULT %s" % format_server_default(compiler, server_default) if comment: spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value( comment, sqltypes.String() ) return spec @compiles(schema.DropConstraint, "mysql", "mariadb") def _mysql_drop_constraint( element: "DropConstraint", compiler: "MySQLDDLCompiler", **kw ) -> str: """Redefine SQLAlchemy's drop constraint to raise errors for invalid constraint type.""" constraint = element.element if isinstance( constraint, ( schema.ForeignKeyConstraint, schema.PrimaryKeyConstraint, schema.UniqueConstraint, ), ): assert not kw return compiler.visit_drop_constraint(element) elif isinstance(constraint, schema.CheckConstraint): # note that SQLAlchemy as of 1.2 does not yet support # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully # here. if _is_mariadb(compiler.dialect): return "ALTER TABLE %s DROP CONSTRAINT %s" % ( compiler.preparer.format_table(constraint.table), compiler.preparer.format_constraint(constraint), ) else: return "ALTER TABLE %s DROP CHECK %s" % ( compiler.preparer.format_table(constraint.table), compiler.preparer.format_constraint(constraint), ) else: raise NotImplementedError( "No generic 'DROP CONSTRAINT' in MySQL - " "please specify constraint type" ) alembic-rel_1_7_6/alembic/ddl/oracle.py000066400000000000000000000122051417624537100201150ustar00rootroot00000000000000from typing import Any from typing import Optional from typing import TYPE_CHECKING from typing import Union from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql import sqltypes from .base import AddColumn from .base import alter_table from .base import ColumnComment from .base import ColumnDefault from .base import ColumnName from .base import ColumnNullable from .base import ColumnType from .base import format_column_name from .base import format_server_default from .base import format_table_name from .base import format_type from .base import IdentityColumnDefault from .base import RenameTable from .impl import DefaultImpl if TYPE_CHECKING: from sqlalchemy.dialects.oracle.base import OracleDDLCompiler from sqlalchemy.engine.cursor import CursorResult from sqlalchemy.engine.cursor import LegacyCursorResult from sqlalchemy.sql.schema import Column class OracleImpl(DefaultImpl): __dialect__ = "oracle" transactional_ddl = False batch_separator = "/" command_terminator = "" type_synonyms = DefaultImpl.type_synonyms + ( {"VARCHAR", "VARCHAR2"}, {"BIGINT", "INTEGER", "SMALLINT", "DECIMAL", "NUMERIC", "NUMBER"}, ) identity_attrs_ignore = () def __init__(self, *arg, **kw) -> None: super(OracleImpl, self).__init__(*arg, **kw) self.batch_separator = self.context_opts.get( "oracle_batch_separator", self.batch_separator ) def _exec( self, construct: Any, *args, **kw ) -> Optional[Union["LegacyCursorResult", "CursorResult"]]: result = super(OracleImpl, self)._exec(construct, *args, **kw) if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) return result def emit_begin(self) -> None: self._exec("SET TRANSACTION READ WRITE") def emit_commit(self) -> None: self._exec("COMMIT") @compiles(AddColumn, "oracle") def visit_add_column( element: "AddColumn", compiler: "OracleDDLCompiler", **kw ) -> str: return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), add_column(compiler, element.column, **kw), ) @compiles(ColumnNullable, "oracle") def visit_column_nullable( element: "ColumnNullable", compiler: "OracleDDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "NULL" if element.nullable else "NOT NULL", ) @compiles(ColumnType, "oracle") def visit_column_type( element: "ColumnType", compiler: "OracleDDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "%s" % format_type(compiler, element.type_), ) @compiles(ColumnName, "oracle") def visit_column_name( element: "ColumnName", compiler: "OracleDDLCompiler", **kw ) -> str: return "%s RENAME COLUMN %s TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname), ) @compiles(ColumnDefault, "oracle") def visit_column_default( element: "ColumnDefault", compiler: "OracleDDLCompiler", **kw ) -> str: return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DEFAULT NULL", ) @compiles(ColumnComment, "oracle") def visit_column_comment( element: "ColumnComment", compiler: "OracleDDLCompiler", **kw ) -> str: ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" comment = compiler.sql_compiler.render_literal_value( (element.comment if element.comment is not None else ""), sqltypes.String(), ) return ddl.format( table_name=element.table_name, column_name=element.column_name, comment=comment, ) @compiles(RenameTable, "oracle") def visit_rename_table( element: "RenameTable", compiler: "OracleDDLCompiler", **kw ) -> str: return "%s RENAME TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, None), ) def alter_column(compiler: "OracleDDLCompiler", name: str) -> str: return "MODIFY %s" % format_column_name(compiler, name) def add_column(compiler: "OracleDDLCompiler", column: "Column", **kw) -> str: return "ADD %s" % compiler.get_column_specification(column, **kw) @compiles(IdentityColumnDefault, "oracle") def visit_identity_column( element: "IdentityColumnDefault", compiler: "OracleDDLCompiler", **kw ): text = "%s %s " % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), ) if element.default is None: # drop identity text += "DROP IDENTITY" return text else: text += compiler.visit_identity_column(element.default) return text alembic-rel_1_7_6/alembic/ddl/postgresql.py000066400000000000000000000532641417624537100210650ustar00rootroot00000000000000import logging import re from typing import Any from typing import cast from typing import List from typing import Optional from typing import Sequence from typing import Tuple from typing import TYPE_CHECKING from typing import Union from sqlalchemy import Column from sqlalchemy import Numeric from sqlalchemy import text from sqlalchemy import types as sqltypes from sqlalchemy.dialects.postgresql import BIGINT from sqlalchemy.dialects.postgresql import ExcludeConstraint from sqlalchemy.dialects.postgresql import INTEGER from sqlalchemy.schema import CreateIndex from sqlalchemy.sql.elements import ColumnClause from sqlalchemy.sql.elements import UnaryExpression from sqlalchemy.types import NULLTYPE from .base import alter_column from .base import alter_table from .base import AlterColumn from .base import ColumnComment from .base import compiles from .base import format_column_name from .base import format_table_name from .base import format_type from .base import IdentityColumnDefault from .base import RenameTable from .impl import DefaultImpl from .. import util from ..autogenerate import render from ..operations import ops from ..operations import schemaobj from ..operations.base import BatchOperations from ..operations.base import Operations from ..util import sqla_compat if TYPE_CHECKING: from typing import Literal from sqlalchemy.dialects.postgresql.array import ARRAY from sqlalchemy.dialects.postgresql.base import PGDDLCompiler from sqlalchemy.dialects.postgresql.hstore import HSTORE from sqlalchemy.dialects.postgresql.json import JSON from sqlalchemy.dialects.postgresql.json import JSONB from sqlalchemy.sql.elements import BinaryExpression from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.schema import MetaData from sqlalchemy.sql.schema import Table from sqlalchemy.sql.type_api import TypeEngine from .base import _ServerDefault from ..autogenerate.api import AutogenContext from ..autogenerate.render import _f_name from ..runtime.migration import MigrationContext log = logging.getLogger(__name__) class PostgresqlImpl(DefaultImpl): __dialect__ = "postgresql" transactional_ddl = True type_synonyms = DefaultImpl.type_synonyms + ( {"FLOAT", "DOUBLE PRECISION"}, ) identity_attrs_ignore = ("on_null", "order") def create_index(self, index): # this likely defaults to None if not present, so get() # should normally not return the default value. being # defensive in any case postgresql_include = index.kwargs.get("postgresql_include", None) or () for col in postgresql_include: if col not in index.table.c: index.table.append_column(Column(col, sqltypes.NullType)) self._exec(CreateIndex(index)) def prep_table_for_batch(self, batch_impl, table): for constraint in table.constraints: if ( constraint.name is not None and constraint.name in batch_impl.named_constraints ): self.drop_constraint(constraint) def compare_server_default( self, inspector_column, metadata_column, rendered_metadata_default, rendered_inspector_default, ): # don't do defaults for SERIAL columns if ( metadata_column.primary_key and metadata_column is metadata_column.table._autoincrement_column ): return False conn_col_default = rendered_inspector_default defaults_equal = conn_col_default == rendered_metadata_default if defaults_equal: return False if None in (conn_col_default, rendered_metadata_default): return not defaults_equal # check for unquoted string and quote for PG String types if ( not isinstance(inspector_column.type, Numeric) and metadata_column.server_default is not None and isinstance(metadata_column.server_default.arg, str) and not re.match(r"^'.*'$", rendered_metadata_default) ): rendered_metadata_default = "'%s'" % rendered_metadata_default return not self.connection.scalar( text( "SELECT %s = %s" % (conn_col_default, rendered_metadata_default) ) ) def alter_column( # type:ignore[override] self, table_name: str, column_name: str, nullable: Optional[bool] = None, server_default: Union["_ServerDefault", "Literal[False]"] = False, name: Optional[str] = None, type_: Optional["TypeEngine"] = None, schema: Optional[str] = None, autoincrement: Optional[bool] = None, existing_type: Optional["TypeEngine"] = None, existing_server_default: Optional["_ServerDefault"] = None, existing_nullable: Optional[bool] = None, existing_autoincrement: Optional[bool] = None, **kw: Any ) -> None: using = kw.pop("postgresql_using", None) if using is not None and type_ is None: raise util.CommandError( "postgresql_using must be used with the type_ parameter" ) if type_ is not None: self._exec( PostgresqlColumnType( table_name, column_name, type_, schema=schema, using=using, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, ) ) super(PostgresqlImpl, self).alter_column( table_name, column_name, nullable=nullable, server_default=server_default, name=name, schema=schema, autoincrement=autoincrement, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_autoincrement=existing_autoincrement, **kw ) def autogen_column_reflect(self, inspector, table, column_info): if column_info.get("default") and isinstance( column_info["type"], (INTEGER, BIGINT) ): seq_match = re.match( r"nextval\('(.+?)'::regclass\)", column_info["default"] ) if seq_match: info = sqla_compat._exec_on_inspector( inspector, text( "select c.relname, a.attname " "from pg_class as c join " "pg_depend d on d.objid=c.oid and " "d.classid='pg_class'::regclass and " "d.refclassid='pg_class'::regclass " "join pg_class t on t.oid=d.refobjid " "join pg_attribute a on a.attrelid=t.oid and " "a.attnum=d.refobjsubid " "where c.relkind='S' and c.relname=:seqname" ), seqname=seq_match.group(1), ).first() if info: seqname, colname = info if colname == column_info["name"]: log.info( "Detected sequence named '%s' as " "owned by integer column '%s(%s)', " "assuming SERIAL and omitting", seqname, table.name, colname, ) # sequence, and the owner is this column, # its a SERIAL - whack it! del column_info["default"] def correct_for_autogen_constraints( self, conn_unique_constraints, conn_indexes, metadata_unique_constraints, metadata_indexes, ): conn_indexes_by_name = dict((c.name, c) for c in conn_indexes) doubled_constraints = set( index for index in conn_indexes if index.info.get("duplicates_constraint") ) for ix in doubled_constraints: conn_indexes.remove(ix) for idx in list(metadata_indexes): if idx.name in conn_indexes_by_name: continue exprs = idx.expressions for expr in exprs: while isinstance(expr, UnaryExpression): expr = expr.element if not isinstance(expr, Column): util.warn( "autogenerate skipping functional index %s; " "not supported by SQLAlchemy reflection" % idx.name ) metadata_indexes.discard(idx) def render_type( self, type_: "TypeEngine", autogen_context: "AutogenContext" ) -> Union[str, "Literal[False]"]: mod = type(type_).__module__ if not mod.startswith("sqlalchemy.dialects.postgresql"): return False if hasattr(self, "_render_%s_type" % type_.__visit_name__): meth = getattr(self, "_render_%s_type" % type_.__visit_name__) return meth(type_, autogen_context) return False def _render_HSTORE_type( self, type_: "HSTORE", autogen_context: "AutogenContext" ) -> str: return cast( str, render._render_type_w_subtype( type_, autogen_context, "text_type", r"(.+?\(.*text_type=)" ), ) def _render_ARRAY_type( self, type_: "ARRAY", autogen_context: "AutogenContext" ) -> str: return cast( str, render._render_type_w_subtype( type_, autogen_context, "item_type", r"(.+?\()" ), ) def _render_JSON_type( self, type_: "JSON", autogen_context: "AutogenContext" ) -> str: return cast( str, render._render_type_w_subtype( type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" ), ) def _render_JSONB_type( self, type_: "JSONB", autogen_context: "AutogenContext" ) -> str: return cast( str, render._render_type_w_subtype( type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" ), ) class PostgresqlColumnType(AlterColumn): def __init__( self, name: str, column_name: str, type_: "TypeEngine", **kw ) -> None: using = kw.pop("using", None) super(PostgresqlColumnType, self).__init__(name, column_name, **kw) self.type_ = sqltypes.to_instance(type_) self.using = using @compiles(RenameTable, "postgresql") def visit_rename_table( element: RenameTable, compiler: "PGDDLCompiler", **kw ) -> str: return "%s RENAME TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_table_name(compiler, element.new_table_name, None), ) @compiles(PostgresqlColumnType, "postgresql") def visit_column_type( element: PostgresqlColumnType, compiler: "PGDDLCompiler", **kw ) -> str: return "%s %s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "TYPE %s" % format_type(compiler, element.type_), "USING %s" % element.using if element.using else "", ) @compiles(ColumnComment, "postgresql") def visit_column_comment( element: "ColumnComment", compiler: "PGDDLCompiler", **kw ) -> str: ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" comment = ( compiler.sql_compiler.render_literal_value( element.comment, sqltypes.String() ) if element.comment is not None else "NULL" ) return ddl.format( table_name=format_table_name( compiler, element.table_name, element.schema ), column_name=format_column_name(compiler, element.column_name), comment=comment, ) @compiles(IdentityColumnDefault, "postgresql") def visit_identity_column( element: "IdentityColumnDefault", compiler: "PGDDLCompiler", **kw ): text = "%s %s " % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), ) if element.default is None: # drop identity text += "DROP IDENTITY" return text elif element.existing_server_default is None: # add identity options text += "ADD " text += compiler.visit_identity_column(element.default) return text else: # alter identity diff, _, _ = element.impl._compare_identity_default( element.default, element.existing_server_default ) identity = element.default for attr in sorted(diff): if attr == "always": text += "SET GENERATED %s " % ( "ALWAYS" if identity.always else "BY DEFAULT" ) else: text += "SET %s " % compiler.get_identity_options( sqla_compat.Identity(**{attr: getattr(identity, attr)}) ) return text @Operations.register_operation("create_exclude_constraint") @BatchOperations.register_operation( "create_exclude_constraint", "batch_create_exclude_constraint" ) @ops.AddConstraintOp.register_add_constraint("exclude_constraint") class CreateExcludeConstraintOp(ops.AddConstraintOp): """Represent a create exclude constraint operation.""" constraint_type = "exclude" def __init__( self, constraint_name: Optional[str], table_name: Union[str, "quoted_name"], elements: Union[ Sequence[Tuple[str, str]], Sequence[Tuple["ColumnClause", str]], ], where: Optional[Union["BinaryExpression", str]] = None, schema: Optional[str] = None, _orig_constraint: Optional["ExcludeConstraint"] = None, **kw ) -> None: self.constraint_name = constraint_name self.table_name = table_name self.elements = elements self.where = where self.schema = schema self._orig_constraint = _orig_constraint self.kw = kw @classmethod def from_constraint( # type:ignore[override] cls, constraint: "ExcludeConstraint" ) -> "CreateExcludeConstraintOp": constraint_table = sqla_compat._table_for_constraint(constraint) return cls( constraint.name, constraint_table.name, [ (expr, op) for expr, name, op in constraint._render_exprs # type:ignore[attr-defined] # noqa ], where=constraint.where, schema=constraint_table.schema, _orig_constraint=constraint, deferrable=constraint.deferrable, initially=constraint.initially, using=constraint.using, ) def to_constraint( self, migration_context: Optional["MigrationContext"] = None ) -> "ExcludeConstraint": if self._orig_constraint is not None: return self._orig_constraint schema_obj = schemaobj.SchemaObjects(migration_context) t = schema_obj.table(self.table_name, schema=self.schema) excl = ExcludeConstraint( *self.elements, name=self.constraint_name, where=self.where, **self.kw ) for ( expr, name, oper, ) in excl._render_exprs: # type:ignore[attr-defined] t.append_column(Column(name, NULLTYPE)) t.append_constraint(excl) return excl @classmethod def create_exclude_constraint( cls, operations: "Operations", constraint_name: str, table_name: str, *elements: Any, **kw: Any ) -> Optional["Table"]: """Issue an alter to create an EXCLUDE constraint using the current migration context. .. note:: This method is Postgresql specific, and additionally requires at least SQLAlchemy 1.0. e.g.:: from alembic import op op.create_exclude_constraint( "user_excl", "user", ("period", '&&'), ("group", '='), where=("group != 'some group'") ) Note that the expressions work the same way as that of the ``ExcludeConstraint`` object itself; if plain strings are passed, quoting rules must be applied manually. :param name: Name of the constraint. :param table_name: String name of the source table. :param elements: exclude conditions. :param where: SQL expression or SQL string with optional WHERE clause. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. """ op = cls(constraint_name, table_name, elements, **kw) return operations.invoke(op) @classmethod def batch_create_exclude_constraint( cls, operations, constraint_name, *elements, **kw ): """Issue a "create exclude constraint" instruction using the current batch migration context. .. note:: This method is Postgresql specific, and additionally requires at least SQLAlchemy 1.0. .. seealso:: :meth:`.Operations.create_exclude_constraint` """ kw["schema"] = operations.impl.schema op = cls(constraint_name, operations.impl.table_name, elements, **kw) return operations.invoke(op) @render.renderers.dispatch_for(CreateExcludeConstraintOp) def _add_exclude_constraint( autogen_context: "AutogenContext", op: "CreateExcludeConstraintOp" ) -> str: return _exclude_constraint(op.to_constraint(), autogen_context, alter=True) @render._constraint_renderers.dispatch_for(ExcludeConstraint) def _render_inline_exclude_constraint( constraint: "ExcludeConstraint", autogen_context: "AutogenContext", namespace_metadata: "MetaData", ) -> str: rendered = render._user_defined_render( "exclude", constraint, autogen_context ) if rendered is not False: return rendered return _exclude_constraint(constraint, autogen_context, False) def _postgresql_autogenerate_prefix(autogen_context: "AutogenContext") -> str: imports = autogen_context.imports if imports is not None: imports.add("from sqlalchemy.dialects import postgresql") return "postgresql." def _exclude_constraint( constraint: "ExcludeConstraint", autogen_context: "AutogenContext", alter: bool, ) -> str: opts: List[Tuple[str, Union[quoted_name, str, _f_name, None]]] = [] has_batch = autogen_context._has_batch if constraint.deferrable: opts.append(("deferrable", str(constraint.deferrable))) if constraint.initially: opts.append(("initially", str(constraint.initially))) if constraint.using: opts.append(("using", str(constraint.using))) if not has_batch and alter and constraint.table.schema: opts.append(("schema", render._ident(constraint.table.schema))) if not alter and constraint.name: opts.append( ("name", render._render_gen_name(autogen_context, constraint.name)) ) if alter: args = [ repr(render._render_gen_name(autogen_context, constraint.name)) ] if not has_batch: args += [repr(render._ident(constraint.table.name))] args.extend( [ "(%s, %r)" % ( _render_potential_column(sqltext, autogen_context), opstring, ) for sqltext, name, opstring in constraint._render_exprs # type:ignore[attr-defined] # noqa ] ) if constraint.where is not None: args.append( "where=%s" % render._render_potential_expr( constraint.where, autogen_context ) ) args.extend(["%s=%r" % (k, v) for k, v in opts]) return "%(prefix)screate_exclude_constraint(%(args)s)" % { "prefix": render._alembic_autogenerate_prefix(autogen_context), "args": ", ".join(args), } else: args = [ "(%s, %r)" % (_render_potential_column(sqltext, autogen_context), opstring) for sqltext, name, opstring in constraint._render_exprs # type:ignore[attr-defined] # noqa ] if constraint.where is not None: args.append( "where=%s" % render._render_potential_expr( constraint.where, autogen_context ) ) args.extend(["%s=%r" % (k, v) for k, v in opts]) return "%(prefix)sExcludeConstraint(%(args)s)" % { "prefix": _postgresql_autogenerate_prefix(autogen_context), "args": ", ".join(args), } def _render_potential_column( value: Union["ColumnClause", "Column"], autogen_context: "AutogenContext" ) -> str: if isinstance(value, ColumnClause): template = "%(prefix)scolumn(%(name)r)" return template % { "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context), "name": value.name, } else: return render._render_potential_expr( value, autogen_context, wrap_in_text=False ) alembic-rel_1_7_6/alembic/ddl/sqlite.py000066400000000000000000000151161417624537100201550ustar00rootroot00000000000000import re from typing import Any from typing import Dict from typing import Optional from typing import TYPE_CHECKING from typing import Union from sqlalchemy import cast from sqlalchemy import JSON from sqlalchemy import schema from sqlalchemy import sql from .impl import DefaultImpl from .. import util if TYPE_CHECKING: from sqlalchemy.engine.reflection import Inspector from sqlalchemy.sql.elements import Cast from sqlalchemy.sql.elements import ClauseElement from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import Table from sqlalchemy.sql.type_api import TypeEngine from ..operations.batch import BatchOperationsImpl class SQLiteImpl(DefaultImpl): __dialect__ = "sqlite" transactional_ddl = False """SQLite supports transactional DDL, but pysqlite does not: see: http://bugs.python.org/issue10740 """ def requires_recreate_in_batch( self, batch_op: "BatchOperationsImpl" ) -> bool: """Return True if the given :class:`.BatchOperationsImpl` would need the table to be recreated and copied in order to proceed. Normally, only returns True on SQLite when operations other than add_column are present. """ for op in batch_op.batch: if op[0] == "add_column": col = op[1][1] if isinstance( col.server_default, schema.DefaultClause ) and isinstance(col.server_default.arg, sql.ClauseElement): return True elif ( isinstance(col.server_default, util.sqla_compat.Computed) and col.server_default.persisted ): return True elif op[0] not in ("create_index", "drop_index"): return True else: return False def add_constraint(self, const: "Constraint"): # attempt to distinguish between an # auto-gen constraint and an explicit one if const._create_rule is None: # type:ignore[attr-defined] raise NotImplementedError( "No support for ALTER of constraints in SQLite dialect. " "Please refer to the batch mode feature which allows for " "SQLite migrations using a copy-and-move strategy." ) elif const._create_rule(self): # type:ignore[attr-defined] util.warn( "Skipping unsupported ALTER for " "creation of implicit constraint. " "Please refer to the batch mode feature which allows for " "SQLite migrations using a copy-and-move strategy." ) def drop_constraint(self, const: "Constraint"): if const._create_rule is None: # type:ignore[attr-defined] raise NotImplementedError( "No support for ALTER of constraints in SQLite dialect. " "Please refer to the batch mode feature which allows for " "SQLite migrations using a copy-and-move strategy." ) def compare_server_default( self, inspector_column: "Column", metadata_column: "Column", rendered_metadata_default: Optional[str], rendered_inspector_default: Optional[str], ) -> bool: if rendered_metadata_default is not None: rendered_metadata_default = re.sub( r"^\((.+)\)$", r"\1", rendered_metadata_default ) rendered_metadata_default = re.sub( r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default ) if rendered_inspector_default is not None: rendered_inspector_default = re.sub( r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default ) return rendered_inspector_default != rendered_metadata_default def _guess_if_default_is_unparenthesized_sql_expr( self, expr: Optional[str] ) -> bool: """Determine if a server default is a SQL expression or a constant. There are too many assertions that expect server defaults to round-trip identically without parenthesis added so we will add parens only in very specific cases. """ if not expr: return False elif re.match(r"^[0-9\.]$", expr): return False elif re.match(r"^'.+'$", expr): return False elif re.match(r"^\(.+\)$", expr): return False else: return True def autogen_column_reflect( self, inspector: "Inspector", table: "Table", column_info: Dict[str, Any], ) -> None: # SQLite expression defaults require parenthesis when sent # as DDL if self._guess_if_default_is_unparenthesized_sql_expr( column_info.get("default", None) ): column_info["default"] = "(%s)" % (column_info["default"],) def render_ddl_sql_expr( self, expr: "ClauseElement", is_server_default: bool = False, **kw ) -> str: # SQLite expression defaults require parenthesis when sent # as DDL str_expr = super(SQLiteImpl, self).render_ddl_sql_expr( expr, is_server_default=is_server_default, **kw ) if ( is_server_default and self._guess_if_default_is_unparenthesized_sql_expr(str_expr) ): str_expr = "(%s)" % (str_expr,) return str_expr def cast_for_batch_migrate( self, existing: "Column", existing_transfer: Dict[str, Union["TypeEngine", "Cast"]], new_type: "TypeEngine", ) -> None: if ( existing.type._type_affinity # type:ignore[attr-defined] is not new_type._type_affinity # type:ignore[attr-defined] and not isinstance(new_type, JSON) ): existing_transfer["expr"] = cast( existing_transfer["expr"], new_type ) # @compiles(AddColumn, 'sqlite') # def visit_add_column(element, compiler, **kw): # return "%s %s" % ( # alter_table(compiler, element.table_name, element.schema), # add_column(compiler, element.column, **kw) # ) # def add_column(compiler, column, **kw): # text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) # need to modify SQLAlchemy so that the CHECK associated with a Boolean # or Enum gets placed as part of the column constraints, not the Table # see ticket 98 # for const in column.constraints: # text += compiler.process(AddConstraint(const)) # return text alembic-rel_1_7_6/alembic/environment.py000066400000000000000000000000531417624537100204470ustar00rootroot00000000000000from .runtime.environment import * # noqa alembic-rel_1_7_6/alembic/migration.py000066400000000000000000000000511417624537100200720ustar00rootroot00000000000000from .runtime.migration import * # noqa alembic-rel_1_7_6/alembic/op.py000066400000000000000000000002471417624537100165260ustar00rootroot00000000000000from .operations.base import Operations # create proxy functions for # each method on the Operations class. Operations.create_module_class_proxy(globals(), locals()) alembic-rel_1_7_6/alembic/op.pyi000066400000000000000000001276751417624537100167160ustar00rootroot00000000000000# ### this file stubs are generated by tools/write_pyi.py - do not edit ### # ### imports are manually managed from typing import Any from typing import Callable from typing import List from typing import Optional from typing import Sequence from typing import Type from typing import TYPE_CHECKING from typing import Union from sqlalchemy.sql.expression import TableClause from sqlalchemy.sql.expression import Update if TYPE_CHECKING: from sqlalchemy.engine import Connection from sqlalchemy.sql.elements import BinaryExpression from sqlalchemy.sql.elements import conv from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.functions import Function from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Computed from sqlalchemy.sql.schema import Identity from sqlalchemy.sql.schema import Table from sqlalchemy.sql.type_api import TypeEngine from sqlalchemy.util import immutabledict from .operations.ops import MigrateOperation from .util.sqla_compat import _literal_bindparam ### end imports ### def add_column( table_name: str, column: "Column", schema: Optional[str] = None ) -> Optional["Table"]: """Issue an "add column" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy import Column, String op.add_column('organization', Column('name', String()) ) The provided :class:`~sqlalchemy.schema.Column` object can also specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing a remote table name. Alembic will automatically generate a stub "referenced" table and emit a second ALTER statement in order to add the constraint separately:: from alembic import op from sqlalchemy import Column, INTEGER, ForeignKey op.add_column('organization', Column('account_id', INTEGER, ForeignKey('accounts.id')) ) Note that this statement uses the :class:`~sqlalchemy.schema.Column` construct as is from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the column add op.add_column('account', Column('timestamp', TIMESTAMP, server_default=func.now()) ) :param table_name: String name of the parent table. :param column: a :class:`sqlalchemy.schema.Column` object representing the new column. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ def alter_column( table_name: str, column_name: str, nullable: Optional[bool] = None, comment: Union[str, bool, None] = False, server_default: Any = False, new_column_name: Optional[str] = None, type_: Union["TypeEngine", Type["TypeEngine"], None] = None, existing_type: Union["TypeEngine", Type["TypeEngine"], None] = None, existing_server_default: Union[ str, bool, "Identity", "Computed", None ] = False, existing_nullable: Optional[bool] = None, existing_comment: Optional[str] = None, schema: Optional[str] = None, **kw ) -> Optional["Table"]: """Issue an "alter column" instruction using the current migration context. Generally, only that aspect of the column which is being changed, i.e. name, type, nullability, default, needs to be specified. Multiple changes can also be specified at once and the backend should "do the right thing", emitting each change either separately or together as the backend allows. MySQL has special requirements here, since MySQL cannot ALTER a column without a full specification. When producing MySQL-compatible migration files, it is recommended that the ``existing_type``, ``existing_server_default``, and ``existing_nullable`` parameters be present, if not being altered. Type changes which are against the SQLAlchemy "schema" types :class:`~sqlalchemy.types.Boolean` and :class:`~sqlalchemy.types.Enum` may also add or drop constraints which accompany those types on backends that don't support them natively. The ``existing_type`` argument is used in this case to identify and remove a previous constraint that was bound to the type object. :param table_name: string name of the target table. :param column_name: string name of the target column, as it exists before the operation begins. :param nullable: Optional; specify ``True`` or ``False`` to alter the column's nullability. :param server_default: Optional; specify a string SQL expression, :func:`~sqlalchemy.sql.expression.text`, or :class:`~sqlalchemy.schema.DefaultClause` to indicate an alteration to the column's default value. Set to ``None`` to have the default removed. :param comment: optional string text of a new comment to add to the column. .. versionadded:: 1.0.6 :param new_column_name: Optional; specify a string name here to indicate the new name within a column rename operation. :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify a change to the column's type. For SQLAlchemy types that also indicate a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), the constraint is also generated. :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; currently understood by the MySQL dialect. :param existing_type: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify the previous type. This is required for all MySQL column alter operations that don't otherwise specify a new type, as well as for when nullability is being changed on a SQL Server column. It is also used if the type is a so-called SQLlchemy "schema" type which may define a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), so that the constraint can be dropped. :param existing_server_default: Optional; The existing default value of the column. Required on MySQL if an existing default is not being changed; else MySQL removes the default. :param existing_nullable: Optional; the existing nullability of the column. Required on MySQL if the existing nullability is not being changed; else MySQL sets this to NULL. :param existing_autoincrement: Optional; the existing autoincrement of the column. Used for MySQL's system of altering a column that specifies ``AUTO_INCREMENT``. :param existing_comment: string text of the existing comment on the column to be maintained. Required on MySQL if the existing comment on the column is not being changed. .. versionadded:: 1.0.6 :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param postgresql_using: String argument which will indicate a SQL expression to render within the Postgresql-specific USING clause within ALTER COLUMN. This string is taken directly as raw SQL which must explicitly include any necessary quoting or escaping of tokens within the expression. """ def batch_alter_table( table_name, schema=None, recreate="auto", partial_reordering=None, copy_from=None, table_args=(), table_kwargs=immutabledict({}), reflect_args=(), reflect_kwargs=immutabledict({}), naming_convention=None, ): """Invoke a series of per-table migrations in batch. Batch mode allows a series of operations specific to a table to be syntactically grouped together, and allows for alternate modes of table migration, in particular the "recreate" style of migration required by SQLite. "recreate" style is as follows: 1. A new table is created with the new specification, based on the migration directives within the batch, using a temporary name. 2. the data copied from the existing table to the new table. 3. the existing table is dropped. 4. the new table is renamed to the existing table name. The directive by default will only use "recreate" style on the SQLite backend, and only if directives are present which require this form, e.g. anything other than ``add_column()``. The batch operation on other backends will proceed using standard ALTER TABLE operations. The method is used as a context manager, which returns an instance of :class:`.BatchOperations`; this object is the same as :class:`.Operations` except that table names and schema names are omitted. E.g.:: with op.batch_alter_table("some_table") as batch_op: batch_op.add_column(Column('foo', Integer)) batch_op.drop_column('bar') The operations within the context manager are invoked at once when the context is ended. When run against SQLite, if the migrations include operations not supported by SQLite's ALTER TABLE, the entire table will be copied to a new one with the new specification, moving all data across as well. The copy operation by default uses reflection to retrieve the current structure of the table, and therefore :meth:`.batch_alter_table` in this mode requires that the migration is run in "online" mode. The ``copy_from`` parameter may be passed which refers to an existing :class:`.Table` object, which will bypass this reflection step. .. note:: The table copy operation will currently not copy CHECK constraints, and may not copy UNIQUE constraints that are unnamed, as is possible on SQLite. See the section :ref:`sqlite_batch_constraints` for workarounds. :param table_name: name of table :param schema: optional schema name. :param recreate: under what circumstances the table should be recreated. At its default of ``"auto"``, the SQLite dialect will recreate the table if any operations other than ``add_column()``, ``create_index()``, or ``drop_index()`` are present. Other options include ``"always"`` and ``"never"``. :param copy_from: optional :class:`~sqlalchemy.schema.Table` object that will act as the structure of the table being copied. If omitted, table reflection is used to retrieve the structure of the table. .. seealso:: :ref:`batch_offline_mode` :paramref:`~.Operations.batch_alter_table.reflect_args` :paramref:`~.Operations.batch_alter_table.reflect_kwargs` :param reflect_args: a sequence of additional positional arguments that will be applied to the table structure being reflected / copied; this may be used to pass column and constraint overrides to the table that will be reflected, in lieu of passing the whole :class:`~sqlalchemy.schema.Table` using :paramref:`~.Operations.batch_alter_table.copy_from`. :param reflect_kwargs: a dictionary of additional keyword arguments that will be applied to the table structure being copied; this may be used to pass additional table and reflection options to the table that will be reflected, in lieu of passing the whole :class:`~sqlalchemy.schema.Table` using :paramref:`~.Operations.batch_alter_table.copy_from`. :param table_args: a sequence of additional positional arguments that will be applied to the new :class:`~sqlalchemy.schema.Table` when created, in addition to those copied from the source table. This may be used to provide additional constraints such as CHECK constraints that may not be reflected. :param table_kwargs: a dictionary of additional keyword arguments that will be applied to the new :class:`~sqlalchemy.schema.Table` when created, in addition to those copied from the source table. This may be used to provide for additional table options that may not be reflected. :param naming_convention: a naming convention dictionary of the form described at :ref:`autogen_naming_conventions` which will be applied to the :class:`~sqlalchemy.schema.MetaData` during the reflection process. This is typically required if one wants to drop SQLite constraints, as these constraints will not have names when reflected on this backend. Requires SQLAlchemy **0.9.4** or greater. .. seealso:: :ref:`dropping_sqlite_foreign_keys` :param partial_reordering: a list of tuples, each suggesting a desired ordering of two or more columns in the newly created table. Requires that :paramref:`.batch_alter_table.recreate` is set to ``"always"``. Examples, given a table with columns "a", "b", "c", and "d": Specify the order of all columns:: with op.batch_alter_table( "some_table", recreate="always", partial_reordering=[("c", "d", "a", "b")] ) as batch_op: pass Ensure "d" appears before "c", and "b", appears before "a":: with op.batch_alter_table( "some_table", recreate="always", partial_reordering=[("d", "c"), ("b", "a")] ) as batch_op: pass The ordering of columns not included in the partial_reordering set is undefined. Therefore it is best to specify the complete ordering of all columns for best results. .. versionadded:: 1.4.0 .. note:: batch mode requires SQLAlchemy 0.8 or above. .. seealso:: :ref:`batch_migrations` """ def bulk_insert( table: Union["Table", "TableClause"], rows: List[dict], multiinsert: bool = True, ) -> None: """Issue a "bulk insert" operation using the current migration context. This provides a means of representing an INSERT of multiple rows which works equally well in the context of executing on a live connection as well as that of generating a SQL script. In the case of a SQL script, the values are rendered inline into the statement. e.g.:: from alembic import op from datetime import date from sqlalchemy.sql import table, column from sqlalchemy import String, Integer, Date # Create an ad-hoc table to use for the insert statement. accounts_table = table('account', column('id', Integer), column('name', String), column('create_date', Date) ) op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':date(2010, 10, 5)}, {'id':2, 'name':'Ed Williams', 'create_date':date(2007, 5, 27)}, {'id':3, 'name':'Wendy Jones', 'create_date':date(2008, 8, 15)}, ] ) When using --sql mode, some datatypes may not render inline automatically, such as dates and other special types. When this issue is present, :meth:`.Operations.inline_literal` may be used:: op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':op.inline_literal("2010-10-05")}, {'id':2, 'name':'Ed Williams', 'create_date':op.inline_literal("2007-05-27")}, {'id':3, 'name':'Wendy Jones', 'create_date':op.inline_literal("2008-08-15")}, ], multiinsert=False ) When using :meth:`.Operations.inline_literal` in conjunction with :meth:`.Operations.bulk_insert`, in order for the statement to work in "online" (e.g. non --sql) mode, the :paramref:`~.Operations.bulk_insert.multiinsert` flag should be set to ``False``, which will have the effect of individual INSERT statements being emitted to the database, each with a distinct VALUES clause, so that the "inline" values can still be rendered, rather than attempting to pass the values as bound parameters. :param table: a table object which represents the target of the INSERT. :param rows: a list of dictionaries indicating rows. :param multiinsert: when at its default of True and --sql mode is not enabled, the INSERT statement will be executed using "executemany()" style, where all elements in the list of dictionaries are passed as bound parameters in a single list. Setting this to False results in individual INSERT statements being emitted per parameter set, and is needed in those cases where non-literal values are present in the parameter sets. """ def create_check_constraint( constraint_name: Optional[str], table_name: str, condition: Union[str, "BinaryExpression"], schema: Optional[str] = None, **kw ) -> Optional["Table"]: """Issue a "create check constraint" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy.sql import column, func op.create_check_constraint( "ck_user_name_len", "user", func.len(column('name')) > 5 ) CHECK constraints are usually against a SQL expression, so ad-hoc table metadata is usually needed. The function will convert the given arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound to an anonymous table in order to emit the CREATE statement. :param name: Name of the check constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the source table. :param condition: SQL expression that's the condition of the constraint. Can be a string or SQLAlchemy expression language structure. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ def create_exclude_constraint( constraint_name: str, table_name: str, *elements: Any, **kw: Any ) -> Optional["Table"]: """Issue an alter to create an EXCLUDE constraint using the current migration context. .. note:: This method is Postgresql specific, and additionally requires at least SQLAlchemy 1.0. e.g.:: from alembic import op op.create_exclude_constraint( "user_excl", "user", ("period", '&&'), ("group", '='), where=("group != 'some group'") ) Note that the expressions work the same way as that of the ``ExcludeConstraint`` object itself; if plain strings are passed, quoting rules must be applied manually. :param name: Name of the constraint. :param table_name: String name of the source table. :param elements: exclude conditions. :param where: SQL expression or SQL string with optional WHERE clause. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. """ def create_foreign_key( constraint_name: Optional[str], source_table: str, referent_table: str, local_cols: List[str], remote_cols: List[str], onupdate: Optional[str] = None, ondelete: Optional[str] = None, deferrable: Optional[bool] = None, initially: Optional[str] = None, match: Optional[str] = None, source_schema: Optional[str] = None, referent_schema: Optional[str] = None, **dialect_kw ) -> Optional["Table"]: """Issue a "create foreign key" instruction using the current migration context. e.g.:: from alembic import op op.create_foreign_key( "fk_user_address", "address", "user", ["user_id"], ["id"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.ForeignKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param constraint_name: Name of the foreign key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param source_table: String name of the source table. :param referent_table: String name of the destination table. :param local_cols: a list of string column names in the source table. :param remote_cols: a list of string column names in the remote table. :param onupdate: Optional string. If set, emit ON UPDATE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param ondelete: Optional string. If set, emit ON DELETE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param source_schema: Optional schema name of the source table. :param referent_schema: Optional schema name of the destination table. """ def create_index( index_name: str, table_name: str, columns: Sequence[Union[str, "TextClause", "Function"]], schema: Optional[str] = None, unique: bool = False, **kw ) -> Optional["Table"]: """Issue a "create index" instruction using the current migration context. e.g.:: from alembic import op op.create_index('ik_test', 't1', ['foo', 'bar']) Functional indexes can be produced by using the :func:`sqlalchemy.sql.expression.text` construct:: from alembic import op from sqlalchemy import text op.create_index('ik_test', 't1', [text('lower(foo)')]) :param index_name: name of the index. :param table_name: name of the owning table. :param columns: a list consisting of string column names and/or :func:`~sqlalchemy.sql.expression.text` constructs. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param unique: If True, create a unique index. :param quote: Force quoting of this column's name on or off, corresponding to ``True`` or ``False``. When left at its default of ``None``, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it's a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect. :param \**kw: Additional keyword arguments not mentioned above are dialect specific, and passed in the form ``_``. See the documentation regarding an individual dialect at :ref:`dialect_toplevel` for detail on documented arguments. """ def create_primary_key( constraint_name: Optional[str], table_name: str, columns: List[str], schema: Optional[str] = None, ) -> Optional["Table"]: """Issue a "create primary key" instruction using the current migration context. e.g.:: from alembic import op op.create_primary_key( "pk_my_table", "my_table", ["id", "version"] ) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.PrimaryKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param constraint_name: Name of the primary key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions` ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the target table. :param columns: a list of string column names to be applied to the primary key constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ def create_table(table_name: str, *columns, **kw) -> Optional["Table"]: """Issue a "create table" instruction using the current migration context. This directive receives an argument list similar to that of the traditional :class:`sqlalchemy.schema.Table` construct, but without the metadata:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)), Column('timestamp', TIMESTAMP, server_default=func.now()) ) Note that :meth:`.create_table` accepts :class:`~sqlalchemy.schema.Column` constructs directly from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the "timestamp" column op.create_table('account', Column('id', INTEGER, primary_key=True), Column('timestamp', TIMESTAMP, server_default=func.now()) ) The function also returns a newly created :class:`~sqlalchemy.schema.Table` object, corresponding to the table specification given, which is suitable for immediate SQL operations, in particular :meth:`.Operations.bulk_insert`:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op account_table = op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)), Column('timestamp', TIMESTAMP, server_default=func.now()) ) op.bulk_insert( account_table, [ {"name": "A1", "description": "account 1"}, {"name": "A2", "description": "account 2"}, ] ) :param table_name: Name of the table :param \*columns: collection of :class:`~sqlalchemy.schema.Column` objects within the table, as well as optional :class:`~sqlalchemy.schema.Constraint` objects and :class:`~.sqlalchemy.schema.Index` objects. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. :return: the :class:`~sqlalchemy.schema.Table` object corresponding to the parameters given. """ def create_table_comment( table_name: str, comment: Optional[str], existing_comment: None = None, schema: Optional[str] = None, ) -> Optional["Table"]: """Emit a COMMENT ON operation to set the comment for a table. .. versionadded:: 1.0.6 :param table_name: string name of the target table. :param comment: string value of the comment being registered against the specified table. :param existing_comment: String value of a comment already registered on the specified table, used within autogenerate so that the operation is reversible, but not required for direct use. .. seealso:: :meth:`.Operations.drop_table_comment` :paramref:`.Operations.alter_column.comment` """ def create_unique_constraint( constraint_name: Optional[str], table_name: str, columns: Sequence[str], schema: Optional[str] = None, **kw ) -> Any: """Issue a "create unique constraint" instruction using the current migration context. e.g.:: from alembic import op op.create_unique_constraint("uq_user_name", "user", ["name"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.UniqueConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the unique constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the source table. :param columns: a list of string column names in the source table. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ def drop_column( table_name: str, column_name: str, schema: Optional[str] = None, **kw ) -> Optional["Table"]: """Issue a "drop column" instruction using the current migration context. e.g.:: drop_column('organization', 'account_id') :param table_name: name of table :param column_name: name of column :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param mssql_drop_check: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the CHECK constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.check_constraints, then exec's a separate DROP CONSTRAINT for that constraint. :param mssql_drop_default: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the DEFAULT constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.default_constraints, then exec's a separate DROP CONSTRAINT for that default. :param mssql_drop_foreign_key: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop a single FOREIGN KEY constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.foreign_keys/sys.foreign_key_columns, then exec's a separate DROP CONSTRAINT for that default. Only works if the column has exactly one FK constraint which refers to it, at the moment. """ def drop_constraint( constraint_name: str, table_name: str, type_: Optional[str] = None, schema: Optional[str] = None, ) -> Optional["Table"]: """Drop a constraint of the given name, typically via DROP CONSTRAINT. :param constraint_name: name of the constraint. :param table_name: table name. :param type\_: optional, required on MySQL. can be 'foreignkey', 'primary', 'unique', or 'check'. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ def drop_index( index_name: str, table_name: Optional[str] = None, schema: Optional[str] = None, **kw ) -> Optional["Table"]: """Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param index_name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param \**kw: Additional keyword arguments not mentioned above are dialect specific, and passed in the form ``_``. See the documentation regarding an individual dialect at :ref:`dialect_toplevel` for detail on documented arguments. """ def drop_table( table_name: str, schema: Optional[str] = None, **kw: Any ) -> None: """Issue a "drop table" instruction using the current migration context. e.g.:: drop_table("accounts") :param table_name: Name of the table :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. """ def drop_table_comment( table_name: str, existing_comment: Optional[str] = None, schema: Optional[str] = None, ) -> Optional["Table"]: """Issue a "drop table comment" operation to remove an existing comment set on a table. .. versionadded:: 1.0.6 :param table_name: string name of the target table. :param existing_comment: An optional string value of a comment already registered on the specified table. .. seealso:: :meth:`.Operations.create_table_comment` :paramref:`.Operations.alter_column.comment` """ def execute( sqltext: Union[str, "TextClause", "Update"], execution_options: None = None ) -> Optional["Table"]: """Execute the given SQL using the current migration context. The given SQL can be a plain string, e.g.:: op.execute("INSERT INTO table (foo) VALUES ('some value')") Or it can be any kind of Core SQL Expression construct, such as below where we use an update construct:: from sqlalchemy.sql import table, column from sqlalchemy import String from alembic import op account = table('account', column('name', String) ) op.execute( account.update().\\ where(account.c.name==op.inline_literal('account 1')).\\ values({'name':op.inline_literal('account 2')}) ) Above, we made use of the SQLAlchemy :func:`sqlalchemy.sql.expression.table` and :func:`sqlalchemy.sql.expression.column` constructs to make a brief, ad-hoc table construct just for our UPDATE statement. A full :class:`~sqlalchemy.schema.Table` construct of course works perfectly fine as well, though note it's a recommended practice to at least ensure the definition of a table is self-contained within the migration script, rather than imported from a module that may break compatibility with older migrations. In a SQL script context, the statement is emitted directly to the output stream. There is *no* return result, however, as this function is oriented towards generating a change script that can run in "offline" mode. Additionally, parameterized statements are discouraged here, as they *will not work* in offline mode. Above, we use :meth:`.inline_literal` where parameters are to be used. For full interaction with a connected database where parameters can also be used normally, use the "bind" available from the context:: from alembic import op connection = op.get_bind() connection.execute( account.update().where(account.c.name=='account 1'). values({"name": "account 2"}) ) Additionally, when passing the statement as a plain string, it is first coerceed into a :func:`sqlalchemy.sql.expression.text` construct before being passed along. In the less likely case that the literal SQL string contains a colon, it must be escaped with a backslash, as:: op.execute("INSERT INTO table (foo) VALUES ('\:colon_value')") :param sqltext: Any legal SQLAlchemy expression, including: * a string * a :func:`sqlalchemy.sql.expression.text` construct. * a :func:`sqlalchemy.sql.expression.insert` construct. * a :func:`sqlalchemy.sql.expression.update`, :func:`sqlalchemy.sql.expression.insert`, or :func:`sqlalchemy.sql.expression.delete` construct. * Pretty much anything that's "executable" as described in :ref:`sqlexpression_toplevel`. .. note:: when passing a plain string, the statement is coerced into a :func:`sqlalchemy.sql.expression.text` construct. This construct considers symbols with colons, e.g. ``:foo`` to be bound parameters. To avoid this, ensure that colon symbols are escaped, e.g. ``\:foo``. :param execution_options: Optional dictionary of execution options, will be passed to :meth:`sqlalchemy.engine.Connection.execution_options`. """ def f(name: str) -> "conv": """Indicate a string name that has already had a naming convention applied to it. This feature combines with the SQLAlchemy ``naming_convention`` feature to disambiguate constraint names that have already had naming conventions applied to them, versus those that have not. This is necessary in the case that the ``"%(constraint_name)s"`` token is used within a naming convention, so that it can be identified that this particular name should remain fixed. If the :meth:`.Operations.f` is used on a constraint, the naming convention will not take effect:: op.add_column('t', 'x', Boolean(name=op.f('ck_bool_t_x'))) Above, the CHECK constraint generated will have the name ``ck_bool_t_x`` regardless of whether or not a naming convention is in use. Alternatively, if a naming convention is in use, and 'f' is not used, names will be converted along conventions. If the ``target_metadata`` contains the naming convention ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the output of the following: op.add_column('t', 'x', Boolean(name='x')) will be:: CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) The function is rendered in the output of autogenerate when a particular constraint name is already converted. """ def get_bind() -> "Connection": """Return the current 'bind'. Under normal circumstances, this is the :class:`~sqlalchemy.engine.Connection` currently being used to emit SQL to the database. In a SQL script context, this value is ``None``. [TODO: verify this] """ def get_context(): """Return the :class:`.MigrationContext` object that's currently in use. """ def implementation_for(op_cls: Any) -> Callable: """Register an implementation for a given :class:`.MigrateOperation`. This is part of the operation extensibility API. .. seealso:: :ref:`operation_plugins` - example of use """ def inline_literal( value: Union[str, int], type_: None = None ) -> "_literal_bindparam": """Produce an 'inline literal' expression, suitable for using in an INSERT, UPDATE, or DELETE statement. When using Alembic in "offline" mode, CRUD operations aren't compatible with SQLAlchemy's default behavior surrounding literal values, which is that they are converted into bound values and passed separately into the ``execute()`` method of the DBAPI cursor. An offline SQL script needs to have these rendered inline. While it should always be noted that inline literal values are an **enormous** security hole in an application that handles untrusted input, a schema migration is not run in this context, so literals are safe to render inline, with the caveat that advanced types like dates may not be supported directly by SQLAlchemy. See :meth:`.execute` for an example usage of :meth:`.inline_literal`. The environment can also be configured to attempt to render "literal" values inline automatically, for those simple types that are supported by the dialect; see :paramref:`.EnvironmentContext.configure.literal_binds` for this more recently added feature. :param value: The value to render. Strings, integers, and simple numerics should be supported. Other types like boolean, dates, etc. may or may not be supported yet by various backends. :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine` subclass stating the type of this value. In SQLAlchemy expressions, this is usually derived automatically from the Python type of the value itself, as well as based on the context in which the value is used. .. seealso:: :paramref:`.EnvironmentContext.configure.literal_binds` """ def invoke(operation: "MigrateOperation") -> Any: """Given a :class:`.MigrateOperation`, invoke it in terms of this :class:`.Operations` instance. """ def register_operation( name: str, sourcename: Optional[str] = None ) -> Callable: """Register a new operation for this class. This method is normally used to add new operations to the :class:`.Operations` class, and possibly the :class:`.BatchOperations` class as well. All Alembic migration operations are implemented via this system, however the system is also available as a public API to facilitate adding custom operations. .. seealso:: :ref:`operation_plugins` """ def rename_table( old_table_name: str, new_table_name: str, schema: Optional[str] = None ) -> Optional["Table"]: """Emit an ALTER TABLE to rename a table. :param old_table_name: old name. :param new_table_name: new name. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ alembic-rel_1_7_6/alembic/operations/000077500000000000000000000000001417624537100177165ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/operations/__init__.py000066400000000000000000000002701417624537100220260ustar00rootroot00000000000000from . import toimpl from .base import BatchOperations from .base import Operations from .ops import MigrateOperation __all__ = ["Operations", "BatchOperations", "MigrateOperation"] alembic-rel_1_7_6/alembic/operations/base.py000066400000000000000000000440671417624537100212150ustar00rootroot00000000000000from contextlib import contextmanager import re import textwrap from typing import Any from typing import Callable from typing import Iterator from typing import List # noqa from typing import Optional from typing import Sequence # noqa from typing import Type # noqa from typing import TYPE_CHECKING from typing import Union from sqlalchemy.sql.elements import conv from . import batch from . import schemaobj from .. import util from ..util import sqla_compat from ..util.compat import formatannotation_fwdref from ..util.compat import inspect_formatargspec from ..util.compat import inspect_getfullargspec NoneType = type(None) if TYPE_CHECKING: from sqlalchemy import Table # noqa from sqlalchemy.engine import Connection from .batch import BatchOperationsImpl from .ops import MigrateOperation from ..runtime.migration import MigrationContext from ..util.sqla_compat import _literal_bindparam __all__ = ("Operations", "BatchOperations") class Operations(util.ModuleClsProxy): """Define high level migration operations. Each operation corresponds to some schema migration operation, executed against a particular :class:`.MigrationContext` which in turn represents connectivity to a database, or a file output stream. While :class:`.Operations` is normally configured as part of the :meth:`.EnvironmentContext.run_migrations` method called from an ``env.py`` script, a standalone :class:`.Operations` instance can be made for use cases external to regular Alembic migrations by passing in a :class:`.MigrationContext`:: from alembic.migration import MigrationContext from alembic.operations import Operations conn = myengine.connect() ctx = MigrationContext.configure(conn) op = Operations(ctx) op.alter_column("t", "c", nullable=True) Note that as of 0.8, most of the methods on this class are produced dynamically using the :meth:`.Operations.register_operation` method. """ _to_impl = util.Dispatcher() def __init__( self, migration_context: "MigrationContext", impl: Optional["BatchOperationsImpl"] = None, ) -> None: """Construct a new :class:`.Operations` :param migration_context: a :class:`.MigrationContext` instance. """ self.migration_context = migration_context if impl is None: self.impl = migration_context.impl else: self.impl = impl self.schema_obj = schemaobj.SchemaObjects(migration_context) @classmethod def register_operation( cls, name: str, sourcename: Optional[str] = None ) -> Callable: """Register a new operation for this class. This method is normally used to add new operations to the :class:`.Operations` class, and possibly the :class:`.BatchOperations` class as well. All Alembic migration operations are implemented via this system, however the system is also available as a public API to facilitate adding custom operations. .. seealso:: :ref:`operation_plugins` """ def register(op_cls): if sourcename is None: fn = getattr(op_cls, name) source_name = fn.__name__ else: fn = getattr(op_cls, sourcename) source_name = fn.__name__ spec = inspect_getfullargspec(fn) name_args = spec[0] assert name_args[0:2] == ["cls", "operations"] name_args[0:2] = ["self"] args = inspect_formatargspec( *spec, formatannotation=formatannotation_fwdref ) num_defaults = len(spec[3]) if spec[3] else 0 if num_defaults: defaulted_vals = name_args[0 - num_defaults :] else: defaulted_vals = () apply_kw = inspect_formatargspec( name_args, spec[1], spec[2], defaulted_vals, formatvalue=lambda x: "=" + x, formatannotation=formatannotation_fwdref, ) args = re.sub( r'[_]?ForwardRef\(([\'"].+?[\'"])\)', lambda m: m.group(1), args, ) func_text = textwrap.dedent( """\ def %(name)s%(args)s: %(doc)r return op_cls.%(source_name)s%(apply_kw)s """ % { "name": name, "source_name": source_name, "args": args, "apply_kw": apply_kw, "doc": fn.__doc__, } ) globals_ = dict(globals()) globals_.update({"op_cls": op_cls}) lcl = {} exec(func_text, globals_, lcl) setattr(cls, name, lcl[name]) fn.__func__.__doc__ = ( "This method is proxied on " "the :class:`.%s` class, via the :meth:`.%s.%s` method." % (cls.__name__, cls.__name__, name) ) if hasattr(fn, "_legacy_translations"): lcl[name]._legacy_translations = fn._legacy_translations return op_cls return register @classmethod def implementation_for(cls, op_cls: Any) -> Callable: """Register an implementation for a given :class:`.MigrateOperation`. This is part of the operation extensibility API. .. seealso:: :ref:`operation_plugins` - example of use """ def decorate(fn): cls._to_impl.dispatch_for(op_cls)(fn) return fn return decorate @classmethod @contextmanager def context( cls, migration_context: "MigrationContext" ) -> Iterator["Operations"]: op = Operations(migration_context) op._install_proxy() yield op op._remove_proxy() @contextmanager def batch_alter_table( self, table_name, schema=None, recreate="auto", partial_reordering=None, copy_from=None, table_args=(), table_kwargs=util.immutabledict(), reflect_args=(), reflect_kwargs=util.immutabledict(), naming_convention=None, ): """Invoke a series of per-table migrations in batch. Batch mode allows a series of operations specific to a table to be syntactically grouped together, and allows for alternate modes of table migration, in particular the "recreate" style of migration required by SQLite. "recreate" style is as follows: 1. A new table is created with the new specification, based on the migration directives within the batch, using a temporary name. 2. the data copied from the existing table to the new table. 3. the existing table is dropped. 4. the new table is renamed to the existing table name. The directive by default will only use "recreate" style on the SQLite backend, and only if directives are present which require this form, e.g. anything other than ``add_column()``. The batch operation on other backends will proceed using standard ALTER TABLE operations. The method is used as a context manager, which returns an instance of :class:`.BatchOperations`; this object is the same as :class:`.Operations` except that table names and schema names are omitted. E.g.:: with op.batch_alter_table("some_table") as batch_op: batch_op.add_column(Column('foo', Integer)) batch_op.drop_column('bar') The operations within the context manager are invoked at once when the context is ended. When run against SQLite, if the migrations include operations not supported by SQLite's ALTER TABLE, the entire table will be copied to a new one with the new specification, moving all data across as well. The copy operation by default uses reflection to retrieve the current structure of the table, and therefore :meth:`.batch_alter_table` in this mode requires that the migration is run in "online" mode. The ``copy_from`` parameter may be passed which refers to an existing :class:`.Table` object, which will bypass this reflection step. .. note:: The table copy operation will currently not copy CHECK constraints, and may not copy UNIQUE constraints that are unnamed, as is possible on SQLite. See the section :ref:`sqlite_batch_constraints` for workarounds. :param table_name: name of table :param schema: optional schema name. :param recreate: under what circumstances the table should be recreated. At its default of ``"auto"``, the SQLite dialect will recreate the table if any operations other than ``add_column()``, ``create_index()``, or ``drop_index()`` are present. Other options include ``"always"`` and ``"never"``. :param copy_from: optional :class:`~sqlalchemy.schema.Table` object that will act as the structure of the table being copied. If omitted, table reflection is used to retrieve the structure of the table. .. seealso:: :ref:`batch_offline_mode` :paramref:`~.Operations.batch_alter_table.reflect_args` :paramref:`~.Operations.batch_alter_table.reflect_kwargs` :param reflect_args: a sequence of additional positional arguments that will be applied to the table structure being reflected / copied; this may be used to pass column and constraint overrides to the table that will be reflected, in lieu of passing the whole :class:`~sqlalchemy.schema.Table` using :paramref:`~.Operations.batch_alter_table.copy_from`. :param reflect_kwargs: a dictionary of additional keyword arguments that will be applied to the table structure being copied; this may be used to pass additional table and reflection options to the table that will be reflected, in lieu of passing the whole :class:`~sqlalchemy.schema.Table` using :paramref:`~.Operations.batch_alter_table.copy_from`. :param table_args: a sequence of additional positional arguments that will be applied to the new :class:`~sqlalchemy.schema.Table` when created, in addition to those copied from the source table. This may be used to provide additional constraints such as CHECK constraints that may not be reflected. :param table_kwargs: a dictionary of additional keyword arguments that will be applied to the new :class:`~sqlalchemy.schema.Table` when created, in addition to those copied from the source table. This may be used to provide for additional table options that may not be reflected. :param naming_convention: a naming convention dictionary of the form described at :ref:`autogen_naming_conventions` which will be applied to the :class:`~sqlalchemy.schema.MetaData` during the reflection process. This is typically required if one wants to drop SQLite constraints, as these constraints will not have names when reflected on this backend. Requires SQLAlchemy **0.9.4** or greater. .. seealso:: :ref:`dropping_sqlite_foreign_keys` :param partial_reordering: a list of tuples, each suggesting a desired ordering of two or more columns in the newly created table. Requires that :paramref:`.batch_alter_table.recreate` is set to ``"always"``. Examples, given a table with columns "a", "b", "c", and "d": Specify the order of all columns:: with op.batch_alter_table( "some_table", recreate="always", partial_reordering=[("c", "d", "a", "b")] ) as batch_op: pass Ensure "d" appears before "c", and "b", appears before "a":: with op.batch_alter_table( "some_table", recreate="always", partial_reordering=[("d", "c"), ("b", "a")] ) as batch_op: pass The ordering of columns not included in the partial_reordering set is undefined. Therefore it is best to specify the complete ordering of all columns for best results. .. versionadded:: 1.4.0 .. note:: batch mode requires SQLAlchemy 0.8 or above. .. seealso:: :ref:`batch_migrations` """ impl = batch.BatchOperationsImpl( self, table_name, schema, recreate, copy_from, table_args, table_kwargs, reflect_args, reflect_kwargs, naming_convention, partial_reordering, ) batch_op = BatchOperations(self.migration_context, impl=impl) yield batch_op impl.flush() def get_context(self): """Return the :class:`.MigrationContext` object that's currently in use. """ return self.migration_context def invoke(self, operation: "MigrateOperation") -> Any: """Given a :class:`.MigrateOperation`, invoke it in terms of this :class:`.Operations` instance. """ fn = self._to_impl.dispatch( operation, self.migration_context.impl.__dialect__ ) return fn(self, operation) def f(self, name: str) -> "conv": """Indicate a string name that has already had a naming convention applied to it. This feature combines with the SQLAlchemy ``naming_convention`` feature to disambiguate constraint names that have already had naming conventions applied to them, versus those that have not. This is necessary in the case that the ``"%(constraint_name)s"`` token is used within a naming convention, so that it can be identified that this particular name should remain fixed. If the :meth:`.Operations.f` is used on a constraint, the naming convention will not take effect:: op.add_column('t', 'x', Boolean(name=op.f('ck_bool_t_x'))) Above, the CHECK constraint generated will have the name ``ck_bool_t_x`` regardless of whether or not a naming convention is in use. Alternatively, if a naming convention is in use, and 'f' is not used, names will be converted along conventions. If the ``target_metadata`` contains the naming convention ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the output of the following: op.add_column('t', 'x', Boolean(name='x')) will be:: CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) The function is rendered in the output of autogenerate when a particular constraint name is already converted. """ return conv(name) def inline_literal( self, value: Union[str, int], type_: None = None ) -> "_literal_bindparam": r"""Produce an 'inline literal' expression, suitable for using in an INSERT, UPDATE, or DELETE statement. When using Alembic in "offline" mode, CRUD operations aren't compatible with SQLAlchemy's default behavior surrounding literal values, which is that they are converted into bound values and passed separately into the ``execute()`` method of the DBAPI cursor. An offline SQL script needs to have these rendered inline. While it should always be noted that inline literal values are an **enormous** security hole in an application that handles untrusted input, a schema migration is not run in this context, so literals are safe to render inline, with the caveat that advanced types like dates may not be supported directly by SQLAlchemy. See :meth:`.execute` for an example usage of :meth:`.inline_literal`. The environment can also be configured to attempt to render "literal" values inline automatically, for those simple types that are supported by the dialect; see :paramref:`.EnvironmentContext.configure.literal_binds` for this more recently added feature. :param value: The value to render. Strings, integers, and simple numerics should be supported. Other types like boolean, dates, etc. may or may not be supported yet by various backends. :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine` subclass stating the type of this value. In SQLAlchemy expressions, this is usually derived automatically from the Python type of the value itself, as well as based on the context in which the value is used. .. seealso:: :paramref:`.EnvironmentContext.configure.literal_binds` """ return sqla_compat._literal_bindparam(None, value, type_=type_) def get_bind(self) -> "Connection": """Return the current 'bind'. Under normal circumstances, this is the :class:`~sqlalchemy.engine.Connection` currently being used to emit SQL to the database. In a SQL script context, this value is ``None``. [TODO: verify this] """ return self.migration_context.impl.bind class BatchOperations(Operations): """Modifies the interface :class:`.Operations` for batch mode. This basically omits the ``table_name`` and ``schema`` parameters from associated methods, as these are a given when running under batch mode. .. seealso:: :meth:`.Operations.batch_alter_table` Note that as of 0.8, most of the methods on this class are produced dynamically using the :meth:`.Operations.register_operation` method. """ def _noop(self, operation): raise NotImplementedError( "The %s method does not apply to a batch table alter operation." % operation ) alembic-rel_1_7_6/alembic/operations/batch.py000066400000000000000000000603461417624537100213620ustar00rootroot00000000000000from typing import Any from typing import cast from typing import Dict from typing import List from typing import Optional from typing import Tuple from typing import TYPE_CHECKING from typing import Union from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import ForeignKeyConstraint from sqlalchemy import Index from sqlalchemy import MetaData from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import schema as sql_schema from sqlalchemy import Table from sqlalchemy import types as sqltypes from sqlalchemy.events import SchemaEventTarget from sqlalchemy.util import OrderedDict from sqlalchemy.util import topological from ..util import exc from ..util.sqla_compat import _columns_for_constraint from ..util.sqla_compat import _copy from ..util.sqla_compat import _ensure_scope_for_ddl from ..util.sqla_compat import _fk_is_self_referential from ..util.sqla_compat import _insert_inline from ..util.sqla_compat import _is_type_bound from ..util.sqla_compat import _remove_column_from_collection from ..util.sqla_compat import _resolve_for_variant from ..util.sqla_compat import _select if TYPE_CHECKING: from typing import Literal from sqlalchemy.engine import Dialect from sqlalchemy.sql.elements import ColumnClause from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.functions import Function from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.type_api import TypeEngine from ..ddl.impl import DefaultImpl class BatchOperationsImpl: def __init__( self, operations, table_name, schema, recreate, copy_from, table_args, table_kwargs, reflect_args, reflect_kwargs, naming_convention, partial_reordering, ): self.operations = operations self.table_name = table_name self.schema = schema if recreate not in ("auto", "always", "never"): raise ValueError( "recreate may be one of 'auto', 'always', or 'never'." ) self.recreate = recreate self.copy_from = copy_from self.table_args = table_args self.table_kwargs = dict(table_kwargs) self.reflect_args = reflect_args self.reflect_kwargs = dict(reflect_kwargs) self.reflect_kwargs.setdefault( "listeners", list(self.reflect_kwargs.get("listeners", ())) ) self.reflect_kwargs["listeners"].append( ("column_reflect", operations.impl.autogen_column_reflect) ) self.naming_convention = naming_convention self.partial_reordering = partial_reordering self.batch = [] @property def dialect(self) -> "Dialect": return self.operations.impl.dialect @property def impl(self) -> "DefaultImpl": return self.operations.impl def _should_recreate(self) -> bool: if self.recreate == "auto": return self.operations.impl.requires_recreate_in_batch(self) elif self.recreate == "always": return True else: return False def flush(self) -> None: should_recreate = self._should_recreate() with _ensure_scope_for_ddl(self.impl.connection): if not should_recreate: for opname, arg, kw in self.batch: fn = getattr(self.operations.impl, opname) fn(*arg, **kw) else: if self.naming_convention: m1 = MetaData(naming_convention=self.naming_convention) else: m1 = MetaData() if self.copy_from is not None: existing_table = self.copy_from reflected = False else: existing_table = Table( self.table_name, m1, schema=self.schema, autoload_with=self.operations.get_bind(), *self.reflect_args, **self.reflect_kwargs ) reflected = True batch_impl = ApplyBatchImpl( self.impl, existing_table, self.table_args, self.table_kwargs, reflected, partial_reordering=self.partial_reordering, ) for opname, arg, kw in self.batch: fn = getattr(batch_impl, opname) fn(*arg, **kw) batch_impl._create(self.impl) def alter_column(self, *arg, **kw) -> None: self.batch.append(("alter_column", arg, kw)) def add_column(self, *arg, **kw) -> None: if ( "insert_before" in kw or "insert_after" in kw ) and not self._should_recreate(): raise exc.CommandError( "Can't specify insert_before or insert_after when using " "ALTER; please specify recreate='always'" ) self.batch.append(("add_column", arg, kw)) def drop_column(self, *arg, **kw) -> None: self.batch.append(("drop_column", arg, kw)) def add_constraint(self, const: "Constraint") -> None: self.batch.append(("add_constraint", (const,), {})) def drop_constraint(self, const: "Constraint") -> None: self.batch.append(("drop_constraint", (const,), {})) def rename_table(self, *arg, **kw): self.batch.append(("rename_table", arg, kw)) def create_index(self, idx: "Index") -> None: self.batch.append(("create_index", (idx,), {})) def drop_index(self, idx: "Index") -> None: self.batch.append(("drop_index", (idx,), {})) def create_table_comment(self, table): self.batch.append(("create_table_comment", (table,), {})) def drop_table_comment(self, table): self.batch.append(("drop_table_comment", (table,), {})) def create_table(self, table): raise NotImplementedError("Can't create table in batch mode") def drop_table(self, table): raise NotImplementedError("Can't drop table in batch mode") def create_column_comment(self, column): self.batch.append(("create_column_comment", (column,), {})) class ApplyBatchImpl: def __init__( self, impl: "DefaultImpl", table: "Table", table_args: tuple, table_kwargs: Dict[str, Any], reflected: bool, partial_reordering: tuple = (), ) -> None: self.impl = impl self.table = table # this is a Table object self.table_args = table_args self.table_kwargs = table_kwargs self.temp_table_name = self._calc_temp_name(table.name) self.new_table: Optional[Table] = None self.partial_reordering = partial_reordering # tuple of tuples self.add_col_ordering: Tuple[ Tuple[str, str], ... ] = () # tuple of tuples self.column_transfers = OrderedDict( (c.name, {"expr": c}) for c in self.table.c ) self.existing_ordering = list(self.column_transfers) self.reflected = reflected self._grab_table_elements() @classmethod def _calc_temp_name(cls, tablename: "quoted_name") -> str: return ("_alembic_tmp_%s" % tablename)[0:50] def _grab_table_elements(self) -> None: schema = self.table.schema self.columns: Dict[str, "Column"] = OrderedDict() for c in self.table.c: c_copy = _copy(c, schema=schema) c_copy.unique = c_copy.index = False # ensure that the type object was copied, # as we may need to modify it in-place if isinstance(c.type, SchemaEventTarget): assert c_copy.type is not c.type self.columns[c.name] = c_copy self.named_constraints: Dict[str, "Constraint"] = {} self.unnamed_constraints = [] self.col_named_constraints = {} self.indexes: Dict[str, "Index"] = {} self.new_indexes: Dict[str, "Index"] = {} for const in self.table.constraints: if _is_type_bound(const): continue elif ( self.reflected and isinstance(const, CheckConstraint) and not const.name ): # TODO: we are skipping unnamed reflected CheckConstraint # because # we have no way to determine _is_type_bound() for these. pass elif const.name: self.named_constraints[const.name] = const else: self.unnamed_constraints.append(const) if not self.reflected: for col in self.table.c: for const in col.constraints: if const.name: self.col_named_constraints[const.name] = (col, const) for idx in self.table.indexes: self.indexes[idx.name] = idx for k in self.table.kwargs: self.table_kwargs.setdefault(k, self.table.kwargs[k]) def _adjust_self_columns_for_partial_reordering(self) -> None: pairs = set() col_by_idx = list(self.columns) if self.partial_reordering: for tuple_ in self.partial_reordering: for index, elem in enumerate(tuple_): if index > 0: pairs.add((tuple_[index - 1], elem)) else: for index, elem in enumerate(self.existing_ordering): if index > 0: pairs.add((col_by_idx[index - 1], elem)) pairs.update(self.add_col_ordering) # this can happen if some columns were dropped and not removed # from existing_ordering. this should be prevented already, but # conservatively making sure this didn't happen pairs_list = [p for p in pairs if p[0] != p[1]] sorted_ = list( topological.sort(pairs_list, col_by_idx, deterministic_order=True) ) self.columns = OrderedDict((k, self.columns[k]) for k in sorted_) self.column_transfers = OrderedDict( (k, self.column_transfers[k]) for k in sorted_ ) def _transfer_elements_to_new_table(self) -> None: assert self.new_table is None, "Can only create new table once" m = MetaData() schema = self.table.schema if self.partial_reordering or self.add_col_ordering: self._adjust_self_columns_for_partial_reordering() self.new_table = new_table = Table( self.temp_table_name, m, *(list(self.columns.values()) + list(self.table_args)), schema=schema, **self.table_kwargs ) for const in ( list(self.named_constraints.values()) + self.unnamed_constraints ): const_columns = set( [c.key for c in _columns_for_constraint(const)] ) if not const_columns.issubset(self.column_transfers): continue const_copy: "Constraint" if isinstance(const, ForeignKeyConstraint): if _fk_is_self_referential(const): # for self-referential constraint, refer to the # *original* table name, and not _alembic_batch_temp. # This is consistent with how we're handling # FK constraints from other tables; we assume SQLite # no foreign keys just keeps the names unchanged, so # when we rename back, they match again. const_copy = _copy( const, schema=schema, target_table=self.table ) else: # "target_table" for ForeignKeyConstraint.copy() is # only used if the FK is detected as being # self-referential, which we are handling above. const_copy = _copy(const, schema=schema) else: const_copy = _copy( const, schema=schema, target_table=new_table ) if isinstance(const, ForeignKeyConstraint): self._setup_referent(m, const) new_table.append_constraint(const_copy) def _gather_indexes_from_both_tables(self) -> List["Index"]: assert self.new_table is not None idx: List[Index] = [] idx.extend(self.indexes.values()) for index in self.new_indexes.values(): idx.append( Index( index.name, unique=index.unique, *[self.new_table.c[col] for col in index.columns.keys()], **index.kwargs ) ) return idx def _setup_referent( self, metadata: "MetaData", constraint: "ForeignKeyConstraint" ) -> None: spec = constraint.elements[ 0 ]._get_colspec() # type:ignore[attr-defined] parts = spec.split(".") tname = parts[-2] if len(parts) == 3: referent_schema = parts[0] else: referent_schema = None if tname != self.temp_table_name: key = sql_schema._get_table_key(tname, referent_schema) def colspec(elem: Any): return elem._get_colspec() if key in metadata.tables: t = metadata.tables[key] for elem in constraint.elements: colname = colspec(elem).split(".")[-1] if colname not in t.c: t.append_column(Column(colname, sqltypes.NULLTYPE)) else: Table( tname, metadata, *[ Column(n, sqltypes.NULLTYPE) for n in [ colspec(elem).split(".")[-1] for elem in constraint.elements ] ], schema=referent_schema ) def _create(self, op_impl: "DefaultImpl") -> None: self._transfer_elements_to_new_table() op_impl.prep_table_for_batch(self, self.table) assert self.new_table is not None op_impl.create_table(self.new_table) try: op_impl._exec( _insert_inline(self.new_table).from_select( list( k for k, transfer in self.column_transfers.items() if "expr" in transfer ), _select( *[ transfer["expr"] for transfer in self.column_transfers.values() if "expr" in transfer ] ), ) ) op_impl.drop_table(self.table) except: op_impl.drop_table(self.new_table) raise else: op_impl.rename_table( self.temp_table_name, self.table.name, schema=self.table.schema ) self.new_table.name = self.table.name try: for idx in self._gather_indexes_from_both_tables(): op_impl.create_index(idx) finally: self.new_table.name = self.temp_table_name def alter_column( self, table_name: str, column_name: str, nullable: Optional[bool] = None, server_default: Optional[Union["Function", str, bool]] = False, name: Optional[str] = None, type_: Optional["TypeEngine"] = None, autoincrement: None = None, comment: Union[str, "Literal[False]"] = False, **kw ) -> None: existing = self.columns[column_name] existing_transfer: Dict[str, Any] = self.column_transfers[column_name] if name is not None and name != column_name: # note that we don't change '.key' - we keep referring # to the renamed column by its old key in _create(). neat! existing.name = name existing_transfer["name"] = name existing_type = kw.get("existing_type", None) if existing_type: resolved_existing_type = _resolve_for_variant( kw["existing_type"], self.impl.dialect ) # pop named constraints for Boolean/Enum for rename if ( isinstance(resolved_existing_type, SchemaEventTarget) and resolved_existing_type.name # type:ignore[attr-defined] # noqa E501 ): self.named_constraints.pop( resolved_existing_type.name, None, # type:ignore[attr-defined] ) if type_ is not None: type_ = sqltypes.to_instance(type_) # old type is being discarded so turn off eventing # rules. Alternatively we can # erase the events set up by this type, but this is simpler. # we also ignore the drop_constraint that will come here from # Operations.implementation_for(alter_column) if isinstance(existing.type, SchemaEventTarget): existing.type._create_events = ( # type:ignore[attr-defined] existing.type.create_constraint # type:ignore[attr-defined] # noqa ) = False self.impl.cast_for_batch_migrate( existing, existing_transfer, type_ ) existing.type = type_ # we *dont* however set events for the new type, because # alter_column is invoked from # Operations.implementation_for(alter_column) which already # will emit an add_constraint() if nullable is not None: existing.nullable = nullable if server_default is not False: if server_default is None: existing.server_default = None else: sql_schema.DefaultClause( server_default )._set_parent( # type:ignore[attr-defined] existing ) if autoincrement is not None: existing.autoincrement = bool(autoincrement) if comment is not False: existing.comment = comment def _setup_dependencies_for_add_column( self, colname: str, insert_before: Optional[str], insert_after: Optional[str], ) -> None: index_cols = self.existing_ordering col_indexes = {name: i for i, name in enumerate(index_cols)} if not self.partial_reordering: if insert_after: if not insert_before: if insert_after in col_indexes: # insert after an existing column idx = col_indexes[insert_after] + 1 if idx < len(index_cols): insert_before = index_cols[idx] else: # insert after a column that is also new insert_before = dict(self.add_col_ordering)[ insert_after ] if insert_before: if not insert_after: if insert_before in col_indexes: # insert before an existing column idx = col_indexes[insert_before] - 1 if idx >= 0: insert_after = index_cols[idx] else: # insert before a column that is also new insert_after = dict( (b, a) for a, b in self.add_col_ordering )[insert_before] if insert_before: self.add_col_ordering += ((colname, insert_before),) if insert_after: self.add_col_ordering += ((insert_after, colname),) if ( not self.partial_reordering and not insert_before and not insert_after and col_indexes ): self.add_col_ordering += ((index_cols[-1], colname),) def add_column( self, table_name: str, column: "Column", insert_before: Optional[str] = None, insert_after: Optional[str] = None, **kw ) -> None: self._setup_dependencies_for_add_column( column.name, insert_before, insert_after ) # we copy the column because operations.add_column() # gives us a Column that is part of a Table already. self.columns[column.name] = _copy(column, schema=self.table.schema) self.column_transfers[column.name] = {} def drop_column( self, table_name: str, column: Union["ColumnClause", "Column"], **kw ) -> None: if column.name in self.table.primary_key.columns: _remove_column_from_collection( self.table.primary_key.columns, column ) del self.columns[column.name] del self.column_transfers[column.name] self.existing_ordering.remove(column.name) # pop named constraints for Boolean/Enum for rename if ( "existing_type" in kw and isinstance(kw["existing_type"], SchemaEventTarget) and kw["existing_type"].name # type:ignore[attr-defined] ): self.named_constraints.pop( kw["existing_type"].name, None # type:ignore[attr-defined] ) def create_column_comment(self, column): """the batch table creation function will issue create_column_comment on the real "impl" as part of the create table process. That is, the Column object will have the comment on it already, so when it is received by add_column() it will be a normal part of the CREATE TABLE and doesn't need an extra step here. """ def create_table_comment(self, table): """the batch table creation function will issue create_table_comment on the real "impl" as part of the create table process. """ def drop_table_comment(self, table): """the batch table creation function will issue drop_table_comment on the real "impl" as part of the create table process. """ def add_constraint(self, const: "Constraint") -> None: if not const.name: raise ValueError("Constraint must have a name") if isinstance(const, sql_schema.PrimaryKeyConstraint): if self.table.primary_key in self.unnamed_constraints: self.unnamed_constraints.remove(self.table.primary_key) self.named_constraints[const.name] = const def drop_constraint(self, const: "Constraint") -> None: if not const.name: raise ValueError("Constraint must have a name") try: if const.name in self.col_named_constraints: col, const = self.col_named_constraints.pop(const.name) for col_const in list(self.columns[col.name].constraints): if col_const.name == const.name: self.columns[col.name].constraints.remove(col_const) else: const = self.named_constraints.pop(cast(str, const.name)) except KeyError: if _is_type_bound(const): # type-bound constraints are only included in the new # table via their type object in any case, so ignore the # drop_constraint() that comes here via the # Operations.implementation_for(alter_column) return raise ValueError("No such constraint: '%s'" % const.name) else: if isinstance(const, PrimaryKeyConstraint): for col in const.columns: self.columns[col.name].primary_key = False def create_index(self, idx: "Index") -> None: self.new_indexes[idx.name] = idx def drop_index(self, idx: "Index") -> None: try: del self.indexes[idx.name] except KeyError: raise ValueError("No such index: '%s'" % idx.name) def rename_table(self, *arg, **kw): raise NotImplementedError("TODO") alembic-rel_1_7_6/alembic/operations/ops.py000066400000000000000000002567521417624537100211120ustar00rootroot00000000000000from abc import abstractmethod import re from typing import Any from typing import Callable from typing import cast from typing import FrozenSet from typing import Iterator from typing import List from typing import MutableMapping from typing import Optional from typing import Sequence from typing import Set from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import Union from sqlalchemy.types import NULLTYPE from . import schemaobj from .base import BatchOperations from .base import Operations from .. import util from ..util import sqla_compat if TYPE_CHECKING: from sqlalchemy.sql.dml import Insert from sqlalchemy.sql.dml import Update from sqlalchemy.sql.elements import BinaryExpression from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import conv from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.functions import Function from sqlalchemy.sql.schema import CheckConstraint from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Computed from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import Identity from sqlalchemy.sql.schema import Index from sqlalchemy.sql.schema import MetaData from sqlalchemy.sql.schema import PrimaryKeyConstraint from sqlalchemy.sql.schema import SchemaItem from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import UniqueConstraint from sqlalchemy.sql.selectable import TableClause from sqlalchemy.sql.type_api import TypeEngine from ..autogenerate.rewriter import Rewriter from ..runtime.migration import MigrationContext class MigrateOperation: """base class for migration command and organization objects. This system is part of the operation extensibility API. .. seealso:: :ref:`operation_objects` :ref:`operation_plugins` :ref:`customizing_revision` """ @util.memoized_property def info(self): """A dictionary that may be used to store arbitrary information along with this :class:`.MigrateOperation` object. """ return {} _mutations: FrozenSet["Rewriter"] = frozenset() def reverse(self) -> "MigrateOperation": raise NotImplementedError def to_diff_tuple(self) -> Tuple[Any, ...]: raise NotImplementedError class AddConstraintOp(MigrateOperation): """Represent an add constraint operation.""" add_constraint_ops = util.Dispatcher() @property def constraint_type(self): raise NotImplementedError() @classmethod def register_add_constraint(cls, type_: str) -> Callable: def go(klass): cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint) return klass return go @classmethod def from_constraint(cls, constraint: "Constraint") -> "AddConstraintOp": return cls.add_constraint_ops.dispatch(constraint.__visit_name__)( constraint ) @abstractmethod def to_constraint( self, migration_context: Optional["MigrationContext"] = None ) -> "Constraint": pass def reverse(self) -> "DropConstraintOp": return DropConstraintOp.from_constraint(self.to_constraint()) def to_diff_tuple(self) -> Tuple[str, "Constraint"]: return ("add_constraint", self.to_constraint()) @Operations.register_operation("drop_constraint") @BatchOperations.register_operation("drop_constraint", "batch_drop_constraint") class DropConstraintOp(MigrateOperation): """Represent a drop constraint operation.""" def __init__( self, constraint_name: Optional[str], table_name: str, type_: Optional[str] = None, schema: Optional[str] = None, _reverse: Optional["AddConstraintOp"] = None, ) -> None: self.constraint_name = constraint_name self.table_name = table_name self.constraint_type = type_ self.schema = schema self._reverse = _reverse def reverse(self) -> "AddConstraintOp": return AddConstraintOp.from_constraint(self.to_constraint()) def to_diff_tuple( self, ) -> Tuple[str, "SchemaItem"]: if self.constraint_type == "foreignkey": return ("remove_fk", self.to_constraint()) else: return ("remove_constraint", self.to_constraint()) @classmethod def from_constraint( cls, constraint: "Constraint", ) -> "DropConstraintOp": types = { "unique_constraint": "unique", "foreign_key_constraint": "foreignkey", "primary_key_constraint": "primary", "check_constraint": "check", "column_check_constraint": "check", "table_or_column_check_constraint": "check", } constraint_table = sqla_compat._table_for_constraint(constraint) return cls( constraint.name, constraint_table.name, schema=constraint_table.schema, type_=types[constraint.__visit_name__], _reverse=AddConstraintOp.from_constraint(constraint), ) def to_constraint( self, ) -> "Constraint": if self._reverse is not None: constraint = self._reverse.to_constraint() constraint.name = self.constraint_name constraint_table = sqla_compat._table_for_constraint(constraint) constraint_table.name = self.table_name constraint_table.schema = self.schema return constraint else: raise ValueError( "constraint cannot be produced; " "original constraint is not present" ) @classmethod def drop_constraint( cls, operations: "Operations", constraint_name: str, table_name: str, type_: Optional[str] = None, schema: Optional[str] = None, ) -> Optional["Table"]: r"""Drop a constraint of the given name, typically via DROP CONSTRAINT. :param constraint_name: name of the constraint. :param table_name: table name. :param type\_: optional, required on MySQL. can be 'foreignkey', 'primary', 'unique', or 'check'. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ op = cls(constraint_name, table_name, type_=type_, schema=schema) return operations.invoke(op) @classmethod def batch_drop_constraint( cls, operations: "BatchOperations", constraint_name: str, type_: Optional[str] = None, ) -> None: """Issue a "drop constraint" instruction using the current batch migration context. The batch form of this call omits the ``table_name`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.drop_constraint` """ op = cls( constraint_name, operations.impl.table_name, type_=type_, schema=operations.impl.schema, ) return operations.invoke(op) @Operations.register_operation("create_primary_key") @BatchOperations.register_operation( "create_primary_key", "batch_create_primary_key" ) @AddConstraintOp.register_add_constraint("primary_key_constraint") class CreatePrimaryKeyOp(AddConstraintOp): """Represent a create primary key operation.""" constraint_type = "primarykey" def __init__( self, constraint_name: Optional[str], table_name: str, columns: Sequence[str], schema: Optional[str] = None, **kw ) -> None: self.constraint_name = constraint_name self.table_name = table_name self.columns = columns self.schema = schema self.kw = kw @classmethod def from_constraint(cls, constraint: "Constraint") -> "CreatePrimaryKeyOp": constraint_table = sqla_compat._table_for_constraint(constraint) pk_constraint = cast("PrimaryKeyConstraint", constraint) return cls( pk_constraint.name, constraint_table.name, pk_constraint.columns.keys(), schema=constraint_table.schema, **pk_constraint.dialect_kwargs, ) def to_constraint( self, migration_context: Optional["MigrationContext"] = None ) -> "PrimaryKeyConstraint": schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.primary_key_constraint( self.constraint_name, self.table_name, self.columns, schema=self.schema, **self.kw, ) @classmethod def create_primary_key( cls, operations: "Operations", constraint_name: Optional[str], table_name: str, columns: List[str], schema: Optional[str] = None, ) -> Optional["Table"]: """Issue a "create primary key" instruction using the current migration context. e.g.:: from alembic import op op.create_primary_key( "pk_my_table", "my_table", ["id", "version"] ) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.PrimaryKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param constraint_name: Name of the primary key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions` ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the target table. :param columns: a list of string column names to be applied to the primary key constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ op = cls(constraint_name, table_name, columns, schema) return operations.invoke(op) @classmethod def batch_create_primary_key( cls, operations: "BatchOperations", constraint_name: str, columns: List[str], ) -> None: """Issue a "create primary key" instruction using the current batch migration context. The batch form of this call omits the ``table_name`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.create_primary_key` """ op = cls( constraint_name, operations.impl.table_name, columns, schema=operations.impl.schema, ) return operations.invoke(op) @Operations.register_operation("create_unique_constraint") @BatchOperations.register_operation( "create_unique_constraint", "batch_create_unique_constraint" ) @AddConstraintOp.register_add_constraint("unique_constraint") class CreateUniqueConstraintOp(AddConstraintOp): """Represent a create unique constraint operation.""" constraint_type = "unique" def __init__( self, constraint_name: Optional[str], table_name: str, columns: Sequence[str], schema: Optional[str] = None, **kw ) -> None: self.constraint_name = constraint_name self.table_name = table_name self.columns = columns self.schema = schema self.kw = kw @classmethod def from_constraint( cls, constraint: "Constraint" ) -> "CreateUniqueConstraintOp": constraint_table = sqla_compat._table_for_constraint(constraint) uq_constraint = cast("UniqueConstraint", constraint) kw: dict = {} if uq_constraint.deferrable: kw["deferrable"] = uq_constraint.deferrable if uq_constraint.initially: kw["initially"] = uq_constraint.initially kw.update(uq_constraint.dialect_kwargs) return cls( uq_constraint.name, constraint_table.name, [c.name for c in uq_constraint.columns], schema=constraint_table.schema, **kw, ) def to_constraint( self, migration_context: Optional["MigrationContext"] = None ) -> "UniqueConstraint": schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.unique_constraint( self.constraint_name, self.table_name, self.columns, schema=self.schema, **self.kw, ) @classmethod def create_unique_constraint( cls, operations: "Operations", constraint_name: Optional[str], table_name: str, columns: Sequence[str], schema: Optional[str] = None, **kw ) -> Any: """Issue a "create unique constraint" instruction using the current migration context. e.g.:: from alembic import op op.create_unique_constraint("uq_user_name", "user", ["name"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.UniqueConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the unique constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the source table. :param columns: a list of string column names in the source table. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ op = cls(constraint_name, table_name, columns, schema=schema, **kw) return operations.invoke(op) @classmethod def batch_create_unique_constraint( cls, operations: "BatchOperations", constraint_name: str, columns: Sequence[str], **kw ) -> Any: """Issue a "create unique constraint" instruction using the current batch migration context. The batch form of this call omits the ``source`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.create_unique_constraint` """ kw["schema"] = operations.impl.schema op = cls(constraint_name, operations.impl.table_name, columns, **kw) return operations.invoke(op) @Operations.register_operation("create_foreign_key") @BatchOperations.register_operation( "create_foreign_key", "batch_create_foreign_key" ) @AddConstraintOp.register_add_constraint("foreign_key_constraint") class CreateForeignKeyOp(AddConstraintOp): """Represent a create foreign key constraint operation.""" constraint_type = "foreignkey" def __init__( self, constraint_name: Optional[str], source_table: str, referent_table: str, local_cols: List[str], remote_cols: List[str], **kw ) -> None: self.constraint_name = constraint_name self.source_table = source_table self.referent_table = referent_table self.local_cols = local_cols self.remote_cols = remote_cols self.kw = kw def to_diff_tuple(self) -> Tuple[str, "ForeignKeyConstraint"]: return ("add_fk", self.to_constraint()) @classmethod def from_constraint(cls, constraint: "Constraint") -> "CreateForeignKeyOp": fk_constraint = cast("ForeignKeyConstraint", constraint) kw: dict = {} if fk_constraint.onupdate: kw["onupdate"] = fk_constraint.onupdate if fk_constraint.ondelete: kw["ondelete"] = fk_constraint.ondelete if fk_constraint.initially: kw["initially"] = fk_constraint.initially if fk_constraint.deferrable: kw["deferrable"] = fk_constraint.deferrable if fk_constraint.use_alter: kw["use_alter"] = fk_constraint.use_alter ( source_schema, source_table, source_columns, target_schema, target_table, target_columns, onupdate, ondelete, deferrable, initially, ) = sqla_compat._fk_spec(fk_constraint) kw["source_schema"] = source_schema kw["referent_schema"] = target_schema kw.update(fk_constraint.dialect_kwargs) return cls( fk_constraint.name, source_table, target_table, source_columns, target_columns, **kw, ) def to_constraint( self, migration_context: Optional["MigrationContext"] = None ) -> "ForeignKeyConstraint": schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.foreign_key_constraint( self.constraint_name, self.source_table, self.referent_table, self.local_cols, self.remote_cols, **self.kw, ) @classmethod def create_foreign_key( cls, operations: "Operations", constraint_name: Optional[str], source_table: str, referent_table: str, local_cols: List[str], remote_cols: List[str], onupdate: Optional[str] = None, ondelete: Optional[str] = None, deferrable: Optional[bool] = None, initially: Optional[str] = None, match: Optional[str] = None, source_schema: Optional[str] = None, referent_schema: Optional[str] = None, **dialect_kw ) -> Optional["Table"]: """Issue a "create foreign key" instruction using the current migration context. e.g.:: from alembic import op op.create_foreign_key( "fk_user_address", "address", "user", ["user_id"], ["id"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.ForeignKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param constraint_name: Name of the foreign key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param source_table: String name of the source table. :param referent_table: String name of the destination table. :param local_cols: a list of string column names in the source table. :param remote_cols: a list of string column names in the remote table. :param onupdate: Optional string. If set, emit ON UPDATE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param ondelete: Optional string. If set, emit ON DELETE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param source_schema: Optional schema name of the source table. :param referent_schema: Optional schema name of the destination table. """ op = cls( constraint_name, source_table, referent_table, local_cols, remote_cols, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, source_schema=source_schema, referent_schema=referent_schema, initially=initially, match=match, **dialect_kw, ) return operations.invoke(op) @classmethod def batch_create_foreign_key( cls, operations: "BatchOperations", constraint_name: str, referent_table: str, local_cols: List[str], remote_cols: List[str], referent_schema: Optional[str] = None, onupdate: None = None, ondelete: None = None, deferrable: None = None, initially: None = None, match: None = None, **dialect_kw ) -> None: """Issue a "create foreign key" instruction using the current batch migration context. The batch form of this call omits the ``source`` and ``source_schema`` arguments from the call. e.g.:: with batch_alter_table("address") as batch_op: batch_op.create_foreign_key( "fk_user_address", "user", ["user_id"], ["id"]) .. seealso:: :meth:`.Operations.create_foreign_key` """ op = cls( constraint_name, operations.impl.table_name, referent_table, local_cols, remote_cols, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, source_schema=operations.impl.schema, referent_schema=referent_schema, initially=initially, match=match, **dialect_kw, ) return operations.invoke(op) @Operations.register_operation("create_check_constraint") @BatchOperations.register_operation( "create_check_constraint", "batch_create_check_constraint" ) @AddConstraintOp.register_add_constraint("check_constraint") @AddConstraintOp.register_add_constraint("table_or_column_check_constraint") @AddConstraintOp.register_add_constraint("column_check_constraint") class CreateCheckConstraintOp(AddConstraintOp): """Represent a create check constraint operation.""" constraint_type = "check" def __init__( self, constraint_name: Optional[str], table_name: str, condition: Union[str, "TextClause", "ColumnElement[Any]"], schema: Optional[str] = None, **kw ) -> None: self.constraint_name = constraint_name self.table_name = table_name self.condition = condition self.schema = schema self.kw = kw @classmethod def from_constraint( cls, constraint: "Constraint" ) -> "CreateCheckConstraintOp": constraint_table = sqla_compat._table_for_constraint(constraint) ck_constraint = cast("CheckConstraint", constraint) return cls( ck_constraint.name, constraint_table.name, cast( "Union[TextClause, ColumnElement[Any]]", ck_constraint.sqltext ), schema=constraint_table.schema, **ck_constraint.dialect_kwargs, ) def to_constraint( self, migration_context: Optional["MigrationContext"] = None ) -> "CheckConstraint": schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.check_constraint( self.constraint_name, self.table_name, self.condition, schema=self.schema, **self.kw, ) @classmethod def create_check_constraint( cls, operations: "Operations", constraint_name: Optional[str], table_name: str, condition: Union[str, "BinaryExpression"], schema: Optional[str] = None, **kw ) -> Optional["Table"]: """Issue a "create check constraint" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy.sql import column, func op.create_check_constraint( "ck_user_name_len", "user", func.len(column('name')) > 5 ) CHECK constraints are usually against a SQL expression, so ad-hoc table metadata is usually needed. The function will convert the given arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound to an anonymous table in order to emit the CREATE statement. :param name: Name of the check constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the source table. :param condition: SQL expression that's the condition of the constraint. Can be a string or SQLAlchemy expression language structure. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ op = cls(constraint_name, table_name, condition, schema=schema, **kw) return operations.invoke(op) @classmethod def batch_create_check_constraint( cls, operations: "BatchOperations", constraint_name: str, condition: "TextClause", **kw ) -> Optional["Table"]: """Issue a "create check constraint" instruction using the current batch migration context. The batch form of this call omits the ``source`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.create_check_constraint` """ op = cls( constraint_name, operations.impl.table_name, condition, schema=operations.impl.schema, **kw, ) return operations.invoke(op) @Operations.register_operation("create_index") @BatchOperations.register_operation("create_index", "batch_create_index") class CreateIndexOp(MigrateOperation): """Represent a create index operation.""" def __init__( self, index_name: str, table_name: str, columns: Sequence[Union[str, "TextClause", "ColumnElement[Any]"]], schema: Optional[str] = None, unique: bool = False, **kw ) -> None: self.index_name = index_name self.table_name = table_name self.columns = columns self.schema = schema self.unique = unique self.kw = kw def reverse(self) -> "DropIndexOp": return DropIndexOp.from_index(self.to_index()) def to_diff_tuple(self) -> Tuple[str, "Index"]: return ("add_index", self.to_index()) @classmethod def from_index(cls, index: "Index") -> "CreateIndexOp": assert index.table is not None return cls( index.name, index.table.name, sqla_compat._get_index_expressions(index), schema=index.table.schema, unique=index.unique, **index.kwargs, ) def to_index( self, migration_context: Optional["MigrationContext"] = None ) -> "Index": schema_obj = schemaobj.SchemaObjects(migration_context) idx = schema_obj.index( self.index_name, self.table_name, self.columns, schema=self.schema, unique=self.unique, **self.kw, ) return idx @classmethod def create_index( cls, operations: Operations, index_name: str, table_name: str, columns: Sequence[Union[str, "TextClause", "Function"]], schema: Optional[str] = None, unique: bool = False, **kw ) -> Optional["Table"]: r"""Issue a "create index" instruction using the current migration context. e.g.:: from alembic import op op.create_index('ik_test', 't1', ['foo', 'bar']) Functional indexes can be produced by using the :func:`sqlalchemy.sql.expression.text` construct:: from alembic import op from sqlalchemy import text op.create_index('ik_test', 't1', [text('lower(foo)')]) :param index_name: name of the index. :param table_name: name of the owning table. :param columns: a list consisting of string column names and/or :func:`~sqlalchemy.sql.expression.text` constructs. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param unique: If True, create a unique index. :param quote: Force quoting of this column's name on or off, corresponding to ``True`` or ``False``. When left at its default of ``None``, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it's a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect. :param \**kw: Additional keyword arguments not mentioned above are dialect specific, and passed in the form ``_``. See the documentation regarding an individual dialect at :ref:`dialect_toplevel` for detail on documented arguments. """ op = cls( index_name, table_name, columns, schema=schema, unique=unique, **kw ) return operations.invoke(op) @classmethod def batch_create_index( cls, operations: "BatchOperations", index_name: str, columns: List[str], **kw ) -> Optional["Table"]: """Issue a "create index" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.create_index` """ op = cls( index_name, operations.impl.table_name, columns, schema=operations.impl.schema, **kw, ) return operations.invoke(op) @Operations.register_operation("drop_index") @BatchOperations.register_operation("drop_index", "batch_drop_index") class DropIndexOp(MigrateOperation): """Represent a drop index operation.""" def __init__( self, index_name: Union["quoted_name", str, "conv"], table_name: Optional[str] = None, schema: Optional[str] = None, _reverse: Optional["CreateIndexOp"] = None, **kw ) -> None: self.index_name = index_name self.table_name = table_name self.schema = schema self._reverse = _reverse self.kw = kw def to_diff_tuple(self) -> Tuple[str, "Index"]: return ("remove_index", self.to_index()) def reverse(self) -> "CreateIndexOp": return CreateIndexOp.from_index(self.to_index()) @classmethod def from_index(cls, index: "Index") -> "DropIndexOp": assert index.table is not None return cls( index.name, index.table.name, schema=index.table.schema, _reverse=CreateIndexOp.from_index(index), **index.kwargs, ) def to_index( self, migration_context: Optional["MigrationContext"] = None ) -> "Index": schema_obj = schemaobj.SchemaObjects(migration_context) # need a dummy column name here since SQLAlchemy # 0.7.6 and further raises on Index with no columns return schema_obj.index( self.index_name, self.table_name, self._reverse.columns if self._reverse else ["x"], schema=self.schema, **self.kw, ) @classmethod def drop_index( cls, operations: "Operations", index_name: str, table_name: Optional[str] = None, schema: Optional[str] = None, **kw ) -> Optional["Table"]: r"""Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param index_name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param \**kw: Additional keyword arguments not mentioned above are dialect specific, and passed in the form ``_``. See the documentation regarding an individual dialect at :ref:`dialect_toplevel` for detail on documented arguments. """ op = cls(index_name, table_name=table_name, schema=schema, **kw) return operations.invoke(op) @classmethod def batch_drop_index( cls, operations: BatchOperations, index_name: str, **kw ) -> Optional["Table"]: """Issue a "drop index" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.drop_index` """ op = cls( index_name, table_name=operations.impl.table_name, schema=operations.impl.schema, **kw, ) return operations.invoke(op) @Operations.register_operation("create_table") class CreateTableOp(MigrateOperation): """Represent a create table operation.""" def __init__( self, table_name: str, columns: Sequence[Union["Column", "Constraint"]], schema: Optional[str] = None, _namespace_metadata: Optional["MetaData"] = None, _constraints_included: bool = False, **kw ) -> None: self.table_name = table_name self.columns = columns self.schema = schema self.info = kw.pop("info", {}) self.comment = kw.pop("comment", None) self.prefixes = kw.pop("prefixes", None) self.kw = kw self._namespace_metadata = _namespace_metadata self._constraints_included = _constraints_included def reverse(self) -> "DropTableOp": return DropTableOp.from_table( self.to_table(), _namespace_metadata=self._namespace_metadata ) def to_diff_tuple(self) -> Tuple[str, "Table"]: return ("add_table", self.to_table()) @classmethod def from_table( cls, table: "Table", _namespace_metadata: Optional["MetaData"] = None ) -> "CreateTableOp": if _namespace_metadata is None: _namespace_metadata = table.metadata return cls( table.name, list(table.c) + list(table.constraints), # type:ignore[arg-type] schema=table.schema, _namespace_metadata=_namespace_metadata, # given a Table() object, this Table will contain full Index() # and UniqueConstraint objects already constructed in response to # each unique=True / index=True flag on a Column. Carry this # state along so that when we re-convert back into a Table, we # skip unique=True/index=True so that these constraints are # not doubled up. see #844 #848 _constraints_included=True, comment=table.comment, info=dict(table.info), prefixes=list(table._prefixes), **table.kwargs, ) def to_table( self, migration_context: Optional["MigrationContext"] = None ) -> "Table": schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.table( self.table_name, *self.columns, schema=self.schema, prefixes=list(self.prefixes) if self.prefixes else [], comment=self.comment, info=self.info.copy() if self.info else {}, _constraints_included=self._constraints_included, **self.kw, ) @classmethod def create_table( cls, operations: "Operations", table_name: str, *columns, **kw ) -> Optional["Table"]: r"""Issue a "create table" instruction using the current migration context. This directive receives an argument list similar to that of the traditional :class:`sqlalchemy.schema.Table` construct, but without the metadata:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)), Column('timestamp', TIMESTAMP, server_default=func.now()) ) Note that :meth:`.create_table` accepts :class:`~sqlalchemy.schema.Column` constructs directly from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the "timestamp" column op.create_table('account', Column('id', INTEGER, primary_key=True), Column('timestamp', TIMESTAMP, server_default=func.now()) ) The function also returns a newly created :class:`~sqlalchemy.schema.Table` object, corresponding to the table specification given, which is suitable for immediate SQL operations, in particular :meth:`.Operations.bulk_insert`:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op account_table = op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)), Column('timestamp', TIMESTAMP, server_default=func.now()) ) op.bulk_insert( account_table, [ {"name": "A1", "description": "account 1"}, {"name": "A2", "description": "account 2"}, ] ) :param table_name: Name of the table :param \*columns: collection of :class:`~sqlalchemy.schema.Column` objects within the table, as well as optional :class:`~sqlalchemy.schema.Constraint` objects and :class:`~.sqlalchemy.schema.Index` objects. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. :return: the :class:`~sqlalchemy.schema.Table` object corresponding to the parameters given. """ op = cls(table_name, columns, **kw) return operations.invoke(op) @Operations.register_operation("drop_table") class DropTableOp(MigrateOperation): """Represent a drop table operation.""" def __init__( self, table_name: str, schema: Optional[str] = None, table_kw: Optional[MutableMapping[Any, Any]] = None, _reverse: Optional["CreateTableOp"] = None, ) -> None: self.table_name = table_name self.schema = schema self.table_kw = table_kw or {} self.comment = self.table_kw.pop("comment", None) self.info = self.table_kw.pop("info", None) self.prefixes = self.table_kw.pop("prefixes", None) self._reverse = _reverse def to_diff_tuple(self) -> Tuple[str, "Table"]: return ("remove_table", self.to_table()) def reverse(self) -> "CreateTableOp": return CreateTableOp.from_table(self.to_table()) @classmethod def from_table( cls, table: "Table", _namespace_metadata: Optional["MetaData"] = None ) -> "DropTableOp": return cls( table.name, schema=table.schema, table_kw={ "comment": table.comment, "info": dict(table.info), "prefixes": list(table._prefixes), **table.kwargs, }, _reverse=CreateTableOp.from_table( table, _namespace_metadata=_namespace_metadata ), ) def to_table( self, migration_context: Optional["MigrationContext"] = None ) -> "Table": if self._reverse: cols_and_constraints = self._reverse.columns else: cols_and_constraints = [] schema_obj = schemaobj.SchemaObjects(migration_context) t = schema_obj.table( self.table_name, *cols_and_constraints, comment=self.comment, info=self.info.copy() if self.info else {}, prefixes=list(self.prefixes) if self.prefixes else [], schema=self.schema, _constraints_included=self._reverse._constraints_included if self._reverse else False, **self.table_kw, ) return t @classmethod def drop_table( cls, operations: "Operations", table_name: str, schema: Optional[str] = None, **kw: Any ) -> None: r"""Issue a "drop table" instruction using the current migration context. e.g.:: drop_table("accounts") :param table_name: Name of the table :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. """ op = cls(table_name, schema=schema, table_kw=kw) operations.invoke(op) class AlterTableOp(MigrateOperation): """Represent an alter table operation.""" def __init__( self, table_name: str, schema: Optional[str] = None, ) -> None: self.table_name = table_name self.schema = schema @Operations.register_operation("rename_table") class RenameTableOp(AlterTableOp): """Represent a rename table operation.""" def __init__( self, old_table_name: str, new_table_name: str, schema: Optional[str] = None, ) -> None: super(RenameTableOp, self).__init__(old_table_name, schema=schema) self.new_table_name = new_table_name @classmethod def rename_table( cls, operations: "Operations", old_table_name: str, new_table_name: str, schema: Optional[str] = None, ) -> Optional["Table"]: """Emit an ALTER TABLE to rename a table. :param old_table_name: old name. :param new_table_name: new name. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ op = cls(old_table_name, new_table_name, schema=schema) return operations.invoke(op) @Operations.register_operation("create_table_comment") @BatchOperations.register_operation( "create_table_comment", "batch_create_table_comment" ) class CreateTableCommentOp(AlterTableOp): """Represent a COMMENT ON `table` operation.""" def __init__( self, table_name: str, comment: Optional[str], schema: Optional[str] = None, existing_comment: Optional[str] = None, ) -> None: self.table_name = table_name self.comment = comment self.existing_comment = existing_comment self.schema = schema @classmethod def create_table_comment( cls, operations: "Operations", table_name: str, comment: Optional[str], existing_comment: None = None, schema: Optional[str] = None, ) -> Optional["Table"]: """Emit a COMMENT ON operation to set the comment for a table. .. versionadded:: 1.0.6 :param table_name: string name of the target table. :param comment: string value of the comment being registered against the specified table. :param existing_comment: String value of a comment already registered on the specified table, used within autogenerate so that the operation is reversible, but not required for direct use. .. seealso:: :meth:`.Operations.drop_table_comment` :paramref:`.Operations.alter_column.comment` """ op = cls( table_name, comment, existing_comment=existing_comment, schema=schema, ) return operations.invoke(op) @classmethod def batch_create_table_comment( cls, operations, comment, existing_comment=None, ): """Emit a COMMENT ON operation to set the comment for a table using the current batch migration context. .. versionadded:: 1.6.0 :param comment: string value of the comment being registered against the specified table. :param existing_comment: String value of a comment already registered on the specified table, used within autogenerate so that the operation is reversible, but not required for direct use. """ op = cls( operations.impl.table_name, comment, existing_comment=existing_comment, schema=operations.impl.schema, ) return operations.invoke(op) def reverse(self): """Reverses the COMMENT ON operation against a table.""" if self.existing_comment is None: return DropTableCommentOp( self.table_name, existing_comment=self.comment, schema=self.schema, ) else: return CreateTableCommentOp( self.table_name, self.existing_comment, existing_comment=self.comment, schema=self.schema, ) def to_table(self, migration_context=None): schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.table( self.table_name, schema=self.schema, comment=self.comment ) def to_diff_tuple(self): return ("add_table_comment", self.to_table(), self.existing_comment) @Operations.register_operation("drop_table_comment") @BatchOperations.register_operation( "drop_table_comment", "batch_drop_table_comment" ) class DropTableCommentOp(AlterTableOp): """Represent an operation to remove the comment from a table.""" def __init__( self, table_name: str, schema: Optional[str] = None, existing_comment: Optional[str] = None, ) -> None: self.table_name = table_name self.existing_comment = existing_comment self.schema = schema @classmethod def drop_table_comment( cls, operations: "Operations", table_name: str, existing_comment: Optional[str] = None, schema: Optional[str] = None, ) -> Optional["Table"]: """Issue a "drop table comment" operation to remove an existing comment set on a table. .. versionadded:: 1.0.6 :param table_name: string name of the target table. :param existing_comment: An optional string value of a comment already registered on the specified table. .. seealso:: :meth:`.Operations.create_table_comment` :paramref:`.Operations.alter_column.comment` """ op = cls(table_name, existing_comment=existing_comment, schema=schema) return operations.invoke(op) @classmethod def batch_drop_table_comment(cls, operations, existing_comment=None): """Issue a "drop table comment" operation to remove an existing comment set on a table using the current batch operations context. .. versionadded:: 1.6.0 :param existing_comment: An optional string value of a comment already registered on the specified table. """ op = cls( operations.impl.table_name, existing_comment=existing_comment, schema=operations.impl.schema, ) return operations.invoke(op) def reverse(self): """Reverses the COMMENT ON operation against a table.""" return CreateTableCommentOp( self.table_name, self.existing_comment, schema=self.schema ) def to_table(self, migration_context=None): schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.table(self.table_name, schema=self.schema) def to_diff_tuple(self): return ("remove_table_comment", self.to_table()) @Operations.register_operation("alter_column") @BatchOperations.register_operation("alter_column", "batch_alter_column") class AlterColumnOp(AlterTableOp): """Represent an alter column operation.""" def __init__( self, table_name: str, column_name: str, schema: Optional[str] = None, existing_type: Optional[Any] = None, existing_server_default: Any = False, existing_nullable: Optional[bool] = None, existing_comment: Optional[str] = None, modify_nullable: Optional[bool] = None, modify_comment: Optional[Union[str, bool]] = False, modify_server_default: Any = False, modify_name: Optional[str] = None, modify_type: Optional[Any] = None, **kw ) -> None: super(AlterColumnOp, self).__init__(table_name, schema=schema) self.column_name = column_name self.existing_type = existing_type self.existing_server_default = existing_server_default self.existing_nullable = existing_nullable self.existing_comment = existing_comment self.modify_nullable = modify_nullable self.modify_comment = modify_comment self.modify_server_default = modify_server_default self.modify_name = modify_name self.modify_type = modify_type self.kw = kw def to_diff_tuple(self) -> Any: col_diff = [] schema, tname, cname = self.schema, self.table_name, self.column_name if self.modify_type is not None: col_diff.append( ( "modify_type", schema, tname, cname, { "existing_nullable": self.existing_nullable, "existing_server_default": ( self.existing_server_default ), "existing_comment": self.existing_comment, }, self.existing_type, self.modify_type, ) ) if self.modify_nullable is not None: col_diff.append( ( "modify_nullable", schema, tname, cname, { "existing_type": self.existing_type, "existing_server_default": ( self.existing_server_default ), "existing_comment": self.existing_comment, }, self.existing_nullable, self.modify_nullable, ) ) if self.modify_server_default is not False: col_diff.append( ( "modify_default", schema, tname, cname, { "existing_nullable": self.existing_nullable, "existing_type": self.existing_type, "existing_comment": self.existing_comment, }, self.existing_server_default, self.modify_server_default, ) ) if self.modify_comment is not False: col_diff.append( ( "modify_comment", schema, tname, cname, { "existing_nullable": self.existing_nullable, "existing_type": self.existing_type, "existing_server_default": ( self.existing_server_default ), }, self.existing_comment, self.modify_comment, ) ) return col_diff def has_changes(self) -> bool: hc1 = ( self.modify_nullable is not None or self.modify_server_default is not False or self.modify_type is not None or self.modify_comment is not False ) if hc1: return True for kw in self.kw: if kw.startswith("modify_"): return True else: return False def reverse(self) -> "AlterColumnOp": kw = self.kw.copy() kw["existing_type"] = self.existing_type kw["existing_nullable"] = self.existing_nullable kw["existing_server_default"] = self.existing_server_default kw["existing_comment"] = self.existing_comment if self.modify_type is not None: kw["modify_type"] = self.modify_type if self.modify_nullable is not None: kw["modify_nullable"] = self.modify_nullable if self.modify_server_default is not False: kw["modify_server_default"] = self.modify_server_default if self.modify_comment is not False: kw["modify_comment"] = self.modify_comment # TODO: make this a little simpler all_keys = set( m.group(1) for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw] if m ) for k in all_keys: if "modify_%s" % k in kw: swap = kw["existing_%s" % k] kw["existing_%s" % k] = kw["modify_%s" % k] kw["modify_%s" % k] = swap return self.__class__( self.table_name, self.column_name, schema=self.schema, **kw ) @classmethod def alter_column( cls, operations: Operations, table_name: str, column_name: str, nullable: Optional[bool] = None, comment: Optional[Union[str, bool]] = False, server_default: Any = False, new_column_name: Optional[str] = None, type_: Optional[Union["TypeEngine", Type["TypeEngine"]]] = None, existing_type: Optional[ Union["TypeEngine", Type["TypeEngine"]] ] = None, existing_server_default: Optional[ Union[str, bool, "Identity", "Computed"] ] = False, existing_nullable: Optional[bool] = None, existing_comment: Optional[str] = None, schema: Optional[str] = None, **kw ) -> Optional["Table"]: r"""Issue an "alter column" instruction using the current migration context. Generally, only that aspect of the column which is being changed, i.e. name, type, nullability, default, needs to be specified. Multiple changes can also be specified at once and the backend should "do the right thing", emitting each change either separately or together as the backend allows. MySQL has special requirements here, since MySQL cannot ALTER a column without a full specification. When producing MySQL-compatible migration files, it is recommended that the ``existing_type``, ``existing_server_default``, and ``existing_nullable`` parameters be present, if not being altered. Type changes which are against the SQLAlchemy "schema" types :class:`~sqlalchemy.types.Boolean` and :class:`~sqlalchemy.types.Enum` may also add or drop constraints which accompany those types on backends that don't support them natively. The ``existing_type`` argument is used in this case to identify and remove a previous constraint that was bound to the type object. :param table_name: string name of the target table. :param column_name: string name of the target column, as it exists before the operation begins. :param nullable: Optional; specify ``True`` or ``False`` to alter the column's nullability. :param server_default: Optional; specify a string SQL expression, :func:`~sqlalchemy.sql.expression.text`, or :class:`~sqlalchemy.schema.DefaultClause` to indicate an alteration to the column's default value. Set to ``None`` to have the default removed. :param comment: optional string text of a new comment to add to the column. .. versionadded:: 1.0.6 :param new_column_name: Optional; specify a string name here to indicate the new name within a column rename operation. :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify a change to the column's type. For SQLAlchemy types that also indicate a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), the constraint is also generated. :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; currently understood by the MySQL dialect. :param existing_type: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify the previous type. This is required for all MySQL column alter operations that don't otherwise specify a new type, as well as for when nullability is being changed on a SQL Server column. It is also used if the type is a so-called SQLlchemy "schema" type which may define a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), so that the constraint can be dropped. :param existing_server_default: Optional; The existing default value of the column. Required on MySQL if an existing default is not being changed; else MySQL removes the default. :param existing_nullable: Optional; the existing nullability of the column. Required on MySQL if the existing nullability is not being changed; else MySQL sets this to NULL. :param existing_autoincrement: Optional; the existing autoincrement of the column. Used for MySQL's system of altering a column that specifies ``AUTO_INCREMENT``. :param existing_comment: string text of the existing comment on the column to be maintained. Required on MySQL if the existing comment on the column is not being changed. .. versionadded:: 1.0.6 :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param postgresql_using: String argument which will indicate a SQL expression to render within the Postgresql-specific USING clause within ALTER COLUMN. This string is taken directly as raw SQL which must explicitly include any necessary quoting or escaping of tokens within the expression. """ alt = cls( table_name, column_name, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, modify_name=new_column_name, modify_type=type_, modify_server_default=server_default, modify_nullable=nullable, modify_comment=comment, **kw, ) return operations.invoke(alt) @classmethod def batch_alter_column( cls, operations: BatchOperations, column_name: str, nullable: Optional[bool] = None, comment: bool = False, server_default: Union["Function", bool] = False, new_column_name: Optional[str] = None, type_: Optional[Union["TypeEngine", Type["TypeEngine"]]] = None, existing_type: Optional[ Union["TypeEngine", Type["TypeEngine"]] ] = None, existing_server_default: bool = False, existing_nullable: None = None, existing_comment: None = None, insert_before: None = None, insert_after: None = None, **kw ) -> Optional["Table"]: """Issue an "alter column" instruction using the current batch migration context. Parameters are the same as that of :meth:`.Operations.alter_column`, as well as the following option(s): :param insert_before: String name of an existing column which this column should be placed before, when creating the new table. .. versionadded:: 1.4.0 :param insert_before: String name of an existing column which this column should be placed after, when creating the new table. If both :paramref:`.BatchOperations.alter_column.insert_before` and :paramref:`.BatchOperations.alter_column.insert_after` are omitted, the column is inserted after the last existing column in the table. .. versionadded:: 1.4.0 .. seealso:: :meth:`.Operations.alter_column` """ alt = cls( operations.impl.table_name, column_name, schema=operations.impl.schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, existing_comment=existing_comment, modify_name=new_column_name, modify_type=type_, modify_server_default=server_default, modify_nullable=nullable, modify_comment=comment, **kw, ) return operations.invoke(alt) @Operations.register_operation("add_column") @BatchOperations.register_operation("add_column", "batch_add_column") class AddColumnOp(AlterTableOp): """Represent an add column operation.""" def __init__( self, table_name: str, column: "Column", schema: Optional[str] = None, **kw ) -> None: super(AddColumnOp, self).__init__(table_name, schema=schema) self.column = column self.kw = kw def reverse(self) -> "DropColumnOp": return DropColumnOp.from_column_and_tablename( self.schema, self.table_name, self.column ) def to_diff_tuple( self, ) -> Tuple[str, Optional[str], str, "Column"]: return ("add_column", self.schema, self.table_name, self.column) def to_column(self) -> "Column": return self.column @classmethod def from_column(cls, col: "Column") -> "AddColumnOp": return cls(col.table.name, col, schema=col.table.schema) @classmethod def from_column_and_tablename( cls, schema: Optional[str], tname: str, col: "Column", ) -> "AddColumnOp": return cls(tname, col, schema=schema) @classmethod def add_column( cls, operations: "Operations", table_name: str, column: "Column", schema: Optional[str] = None, ) -> Optional["Table"]: """Issue an "add column" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy import Column, String op.add_column('organization', Column('name', String()) ) The provided :class:`~sqlalchemy.schema.Column` object can also specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing a remote table name. Alembic will automatically generate a stub "referenced" table and emit a second ALTER statement in order to add the constraint separately:: from alembic import op from sqlalchemy import Column, INTEGER, ForeignKey op.add_column('organization', Column('account_id', INTEGER, ForeignKey('accounts.id')) ) Note that this statement uses the :class:`~sqlalchemy.schema.Column` construct as is from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the column add op.add_column('account', Column('timestamp', TIMESTAMP, server_default=func.now()) ) :param table_name: String name of the parent table. :param column: a :class:`sqlalchemy.schema.Column` object representing the new column. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. """ op = cls(table_name, column, schema=schema) return operations.invoke(op) @classmethod def batch_add_column( cls, operations: "BatchOperations", column: "Column", insert_before: Optional[str] = None, insert_after: Optional[str] = None, ) -> Optional["Table"]: """Issue an "add column" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.add_column` """ kw = {} if insert_before: kw["insert_before"] = insert_before if insert_after: kw["insert_after"] = insert_after op = cls( operations.impl.table_name, column, schema=operations.impl.schema, **kw, ) return operations.invoke(op) @Operations.register_operation("drop_column") @BatchOperations.register_operation("drop_column", "batch_drop_column") class DropColumnOp(AlterTableOp): """Represent a drop column operation.""" def __init__( self, table_name: str, column_name: str, schema: Optional[str] = None, _reverse: Optional["AddColumnOp"] = None, **kw ) -> None: super(DropColumnOp, self).__init__(table_name, schema=schema) self.column_name = column_name self.kw = kw self._reverse = _reverse def to_diff_tuple( self, ) -> Tuple[str, Optional[str], str, "Column"]: return ( "remove_column", self.schema, self.table_name, self.to_column(), ) def reverse(self) -> "AddColumnOp": if self._reverse is None: raise ValueError( "operation is not reversible; " "original column is not present" ) return AddColumnOp.from_column_and_tablename( self.schema, self.table_name, self._reverse.column ) @classmethod def from_column_and_tablename( cls, schema: Optional[str], tname: str, col: "Column", ) -> "DropColumnOp": return cls( tname, col.name, schema=schema, _reverse=AddColumnOp.from_column_and_tablename(schema, tname, col), ) def to_column( self, migration_context: Optional["MigrationContext"] = None ) -> "Column": if self._reverse is not None: return self._reverse.column schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.column(self.column_name, NULLTYPE) @classmethod def drop_column( cls, operations: "Operations", table_name: str, column_name: str, schema: Optional[str] = None, **kw ) -> Optional["Table"]: """Issue a "drop column" instruction using the current migration context. e.g.:: drop_column('organization', 'account_id') :param table_name: name of table :param column_name: name of column :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. :param mssql_drop_check: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the CHECK constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.check_constraints, then exec's a separate DROP CONSTRAINT for that constraint. :param mssql_drop_default: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the DEFAULT constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.default_constraints, then exec's a separate DROP CONSTRAINT for that default. :param mssql_drop_foreign_key: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop a single FOREIGN KEY constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.foreign_keys/sys.foreign_key_columns, then exec's a separate DROP CONSTRAINT for that default. Only works if the column has exactly one FK constraint which refers to it, at the moment. """ op = cls(table_name, column_name, schema=schema, **kw) return operations.invoke(op) @classmethod def batch_drop_column( cls, operations: "BatchOperations", column_name: str, **kw ) -> Optional["Table"]: """Issue a "drop column" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.drop_column` """ op = cls( operations.impl.table_name, column_name, schema=operations.impl.schema, **kw, ) return operations.invoke(op) @Operations.register_operation("bulk_insert") class BulkInsertOp(MigrateOperation): """Represent a bulk insert operation.""" def __init__( self, table: Union["Table", "TableClause"], rows: List[dict], multiinsert: bool = True, ) -> None: self.table = table self.rows = rows self.multiinsert = multiinsert @classmethod def bulk_insert( cls, operations: Operations, table: Union["Table", "TableClause"], rows: List[dict], multiinsert: bool = True, ) -> None: """Issue a "bulk insert" operation using the current migration context. This provides a means of representing an INSERT of multiple rows which works equally well in the context of executing on a live connection as well as that of generating a SQL script. In the case of a SQL script, the values are rendered inline into the statement. e.g.:: from alembic import op from datetime import date from sqlalchemy.sql import table, column from sqlalchemy import String, Integer, Date # Create an ad-hoc table to use for the insert statement. accounts_table = table('account', column('id', Integer), column('name', String), column('create_date', Date) ) op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':date(2010, 10, 5)}, {'id':2, 'name':'Ed Williams', 'create_date':date(2007, 5, 27)}, {'id':3, 'name':'Wendy Jones', 'create_date':date(2008, 8, 15)}, ] ) When using --sql mode, some datatypes may not render inline automatically, such as dates and other special types. When this issue is present, :meth:`.Operations.inline_literal` may be used:: op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':op.inline_literal("2010-10-05")}, {'id':2, 'name':'Ed Williams', 'create_date':op.inline_literal("2007-05-27")}, {'id':3, 'name':'Wendy Jones', 'create_date':op.inline_literal("2008-08-15")}, ], multiinsert=False ) When using :meth:`.Operations.inline_literal` in conjunction with :meth:`.Operations.bulk_insert`, in order for the statement to work in "online" (e.g. non --sql) mode, the :paramref:`~.Operations.bulk_insert.multiinsert` flag should be set to ``False``, which will have the effect of individual INSERT statements being emitted to the database, each with a distinct VALUES clause, so that the "inline" values can still be rendered, rather than attempting to pass the values as bound parameters. :param table: a table object which represents the target of the INSERT. :param rows: a list of dictionaries indicating rows. :param multiinsert: when at its default of True and --sql mode is not enabled, the INSERT statement will be executed using "executemany()" style, where all elements in the list of dictionaries are passed as bound parameters in a single list. Setting this to False results in individual INSERT statements being emitted per parameter set, and is needed in those cases where non-literal values are present in the parameter sets. """ op = cls(table, rows, multiinsert=multiinsert) operations.invoke(op) @Operations.register_operation("execute") class ExecuteSQLOp(MigrateOperation): """Represent an execute SQL operation.""" def __init__( self, sqltext: Union["Update", str, "Insert", "TextClause"], execution_options: None = None, ) -> None: self.sqltext = sqltext self.execution_options = execution_options @classmethod def execute( cls, operations: Operations, sqltext: Union[str, "TextClause", "Update"], execution_options: None = None, ) -> Optional["Table"]: r"""Execute the given SQL using the current migration context. The given SQL can be a plain string, e.g.:: op.execute("INSERT INTO table (foo) VALUES ('some value')") Or it can be any kind of Core SQL Expression construct, such as below where we use an update construct:: from sqlalchemy.sql import table, column from sqlalchemy import String from alembic import op account = table('account', column('name', String) ) op.execute( account.update().\\ where(account.c.name==op.inline_literal('account 1')).\\ values({'name':op.inline_literal('account 2')}) ) Above, we made use of the SQLAlchemy :func:`sqlalchemy.sql.expression.table` and :func:`sqlalchemy.sql.expression.column` constructs to make a brief, ad-hoc table construct just for our UPDATE statement. A full :class:`~sqlalchemy.schema.Table` construct of course works perfectly fine as well, though note it's a recommended practice to at least ensure the definition of a table is self-contained within the migration script, rather than imported from a module that may break compatibility with older migrations. In a SQL script context, the statement is emitted directly to the output stream. There is *no* return result, however, as this function is oriented towards generating a change script that can run in "offline" mode. Additionally, parameterized statements are discouraged here, as they *will not work* in offline mode. Above, we use :meth:`.inline_literal` where parameters are to be used. For full interaction with a connected database where parameters can also be used normally, use the "bind" available from the context:: from alembic import op connection = op.get_bind() connection.execute( account.update().where(account.c.name=='account 1'). values({"name": "account 2"}) ) Additionally, when passing the statement as a plain string, it is first coerceed into a :func:`sqlalchemy.sql.expression.text` construct before being passed along. In the less likely case that the literal SQL string contains a colon, it must be escaped with a backslash, as:: op.execute("INSERT INTO table (foo) VALUES ('\:colon_value')") :param sqltext: Any legal SQLAlchemy expression, including: * a string * a :func:`sqlalchemy.sql.expression.text` construct. * a :func:`sqlalchemy.sql.expression.insert` construct. * a :func:`sqlalchemy.sql.expression.update`, :func:`sqlalchemy.sql.expression.insert`, or :func:`sqlalchemy.sql.expression.delete` construct. * Pretty much anything that's "executable" as described in :ref:`sqlexpression_toplevel`. .. note:: when passing a plain string, the statement is coerced into a :func:`sqlalchemy.sql.expression.text` construct. This construct considers symbols with colons, e.g. ``:foo`` to be bound parameters. To avoid this, ensure that colon symbols are escaped, e.g. ``\:foo``. :param execution_options: Optional dictionary of execution options, will be passed to :meth:`sqlalchemy.engine.Connection.execution_options`. """ op = cls(sqltext, execution_options=execution_options) return operations.invoke(op) class OpContainer(MigrateOperation): """Represent a sequence of operations operation.""" def __init__(self, ops: Sequence[MigrateOperation] = ()) -> None: self.ops = list(ops) def is_empty(self) -> bool: return not self.ops def as_diffs(self) -> Any: return list(OpContainer._ops_as_diffs(self)) @classmethod def _ops_as_diffs( cls, migrations: "OpContainer" ) -> Iterator[Tuple[Any, ...]]: for op in migrations.ops: if hasattr(op, "ops"): for sub_op in cls._ops_as_diffs(cast("OpContainer", op)): yield sub_op else: yield op.to_diff_tuple() class ModifyTableOps(OpContainer): """Contains a sequence of operations that all apply to a single Table.""" def __init__( self, table_name: str, ops: Sequence[MigrateOperation], schema: Optional[str] = None, ) -> None: super(ModifyTableOps, self).__init__(ops) self.table_name = table_name self.schema = schema def reverse(self) -> "ModifyTableOps": return ModifyTableOps( self.table_name, ops=list(reversed([op.reverse() for op in self.ops])), schema=self.schema, ) class UpgradeOps(OpContainer): """contains a sequence of operations that would apply to the 'upgrade' stream of a script. .. seealso:: :ref:`customizing_revision` """ def __init__( self, ops: Sequence[MigrateOperation] = (), upgrade_token: str = "upgrades", ) -> None: super(UpgradeOps, self).__init__(ops=ops) self.upgrade_token = upgrade_token def reverse_into(self, downgrade_ops: "DowngradeOps") -> "DowngradeOps": downgrade_ops.ops[:] = list( # type:ignore[index] reversed([op.reverse() for op in self.ops]) ) return downgrade_ops def reverse(self) -> "DowngradeOps": return self.reverse_into(DowngradeOps(ops=[])) class DowngradeOps(OpContainer): """contains a sequence of operations that would apply to the 'downgrade' stream of a script. .. seealso:: :ref:`customizing_revision` """ def __init__( self, ops: Sequence[MigrateOperation] = (), downgrade_token: str = "downgrades", ) -> None: super(DowngradeOps, self).__init__(ops=ops) self.downgrade_token = downgrade_token def reverse(self): return UpgradeOps( ops=list(reversed([op.reverse() for op in self.ops])) ) class MigrationScript(MigrateOperation): """represents a migration script. E.g. when autogenerate encounters this object, this corresponds to the production of an actual script file. A normal :class:`.MigrationScript` object would contain a single :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive. These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops`` attributes. In the case of an autogenerate operation that runs multiple times, such as the multiple database example in the "multidb" template, the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled, and instead these objects should be accessed via the ``.upgrade_ops_list`` and ``.downgrade_ops_list`` list-based attributes. These latter attributes are always available at the very least as single-element lists. .. seealso:: :ref:`customizing_revision` """ _needs_render: Optional[bool] def __init__( self, rev_id: Optional[str], upgrade_ops: "UpgradeOps", downgrade_ops: "DowngradeOps", message: Optional[str] = None, imports: Set[str] = set(), head: Optional[str] = None, splice: Optional[bool] = None, branch_label: Optional[str] = None, version_path: Optional[str] = None, depends_on: Optional[Union[str, Sequence[str]]] = None, ) -> None: self.rev_id = rev_id self.message = message self.imports = imports self.head = head self.splice = splice self.branch_label = branch_label self.version_path = version_path self.depends_on = depends_on self.upgrade_ops = upgrade_ops self.downgrade_ops = downgrade_ops @property def upgrade_ops(self): """An instance of :class:`.UpgradeOps`. .. seealso:: :attr:`.MigrationScript.upgrade_ops_list` """ if len(self._upgrade_ops) > 1: raise ValueError( "This MigrationScript instance has a multiple-entry " "list for UpgradeOps; please use the " "upgrade_ops_list attribute." ) elif not self._upgrade_ops: return None else: return self._upgrade_ops[0] @upgrade_ops.setter def upgrade_ops(self, upgrade_ops): self._upgrade_ops = util.to_list(upgrade_ops) for elem in self._upgrade_ops: assert isinstance(elem, UpgradeOps) @property def downgrade_ops(self): """An instance of :class:`.DowngradeOps`. .. seealso:: :attr:`.MigrationScript.downgrade_ops_list` """ if len(self._downgrade_ops) > 1: raise ValueError( "This MigrationScript instance has a multiple-entry " "list for DowngradeOps; please use the " "downgrade_ops_list attribute." ) elif not self._downgrade_ops: return None else: return self._downgrade_ops[0] @downgrade_ops.setter def downgrade_ops(self, downgrade_ops): self._downgrade_ops = util.to_list(downgrade_ops) for elem in self._downgrade_ops: assert isinstance(elem, DowngradeOps) @property def upgrade_ops_list(self) -> List["UpgradeOps"]: """A list of :class:`.UpgradeOps` instances. This is used in place of the :attr:`.MigrationScript.upgrade_ops` attribute when dealing with a revision operation that does multiple autogenerate passes. """ return self._upgrade_ops @property def downgrade_ops_list(self) -> List["DowngradeOps"]: """A list of :class:`.DowngradeOps` instances. This is used in place of the :attr:`.MigrationScript.downgrade_ops` attribute when dealing with a revision operation that does multiple autogenerate passes. """ return self._downgrade_ops alembic-rel_1_7_6/alembic/operations/schemaobj.py000066400000000000000000000214061417624537100222260ustar00rootroot00000000000000from typing import Any from typing import Dict from typing import List from typing import Optional from typing import Sequence from typing import Tuple from typing import TYPE_CHECKING from typing import Union from sqlalchemy import schema as sa_schema from sqlalchemy.sql.schema import Column from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import Index from sqlalchemy.types import Integer from sqlalchemy.types import NULLTYPE from .. import util from ..util import sqla_compat if TYPE_CHECKING: from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.schema import CheckConstraint from sqlalchemy.sql.schema import ForeignKey from sqlalchemy.sql.schema import ForeignKeyConstraint from sqlalchemy.sql.schema import MetaData from sqlalchemy.sql.schema import PrimaryKeyConstraint from sqlalchemy.sql.schema import Table from sqlalchemy.sql.schema import UniqueConstraint from sqlalchemy.sql.type_api import TypeEngine from ..runtime.migration import MigrationContext class SchemaObjects: def __init__( self, migration_context: Optional["MigrationContext"] = None ) -> None: self.migration_context = migration_context def primary_key_constraint( self, name: Optional[str], table_name: str, cols: Sequence[str], schema: Optional[str] = None, **dialect_kw ) -> "PrimaryKeyConstraint": m = self.metadata() columns = [sa_schema.Column(n, NULLTYPE) for n in cols] t = sa_schema.Table(table_name, m, *columns, schema=schema) p = sa_schema.PrimaryKeyConstraint( *[t.c[n] for n in cols], name=name, **dialect_kw ) return p def foreign_key_constraint( self, name: Optional[str], source: str, referent: str, local_cols: List[str], remote_cols: List[str], onupdate: Optional[str] = None, ondelete: Optional[str] = None, deferrable: Optional[bool] = None, source_schema: Optional[str] = None, referent_schema: Optional[str] = None, initially: Optional[str] = None, match: Optional[str] = None, **dialect_kw ) -> "ForeignKeyConstraint": m = self.metadata() if source == referent and source_schema == referent_schema: t1_cols = local_cols + remote_cols else: t1_cols = local_cols sa_schema.Table( referent, m, *[sa_schema.Column(n, NULLTYPE) for n in remote_cols], schema=referent_schema ) t1 = sa_schema.Table( source, m, *[sa_schema.Column(n, NULLTYPE) for n in t1_cols], schema=source_schema ) tname = ( "%s.%s" % (referent_schema, referent) if referent_schema else referent ) dialect_kw["match"] = match f = sa_schema.ForeignKeyConstraint( local_cols, ["%s.%s" % (tname, n) for n in remote_cols], name=name, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, initially=initially, **dialect_kw ) t1.append_constraint(f) return f def unique_constraint( self, name: Optional[str], source: str, local_cols: Sequence[str], schema: Optional[str] = None, **kw ) -> "UniqueConstraint": t = sa_schema.Table( source, self.metadata(), *[sa_schema.Column(n, NULLTYPE) for n in local_cols], schema=schema ) kw["name"] = name uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw) # TODO: need event tests to ensure the event # is fired off here t.append_constraint(uq) return uq def check_constraint( self, name: Optional[str], source: str, condition: Union[str, "TextClause", "ColumnElement[Any]"], schema: Optional[str] = None, **kw ) -> Union["CheckConstraint"]: t = sa_schema.Table( source, self.metadata(), sa_schema.Column("x", Integer), schema=schema, ) ck = sa_schema.CheckConstraint(condition, name=name, **kw) t.append_constraint(ck) return ck def generic_constraint( self, name: Optional[str], table_name: str, type_: Optional[str], schema: Optional[str] = None, **kw ) -> Any: t = self.table(table_name, schema=schema) types: Dict[Optional[str], Any] = { "foreignkey": lambda name: sa_schema.ForeignKeyConstraint( [], [], name=name ), "primary": sa_schema.PrimaryKeyConstraint, "unique": sa_schema.UniqueConstraint, "check": lambda name: sa_schema.CheckConstraint("", name=name), None: sa_schema.Constraint, } try: const = types[type_] except KeyError as ke: raise TypeError( "'type' can be one of %s" % ", ".join(sorted(repr(x) for x in types)) ) from ke else: const = const(name=name) t.append_constraint(const) return const def metadata(self) -> "MetaData": kw = {} if ( self.migration_context is not None and "target_metadata" in self.migration_context.opts ): mt = self.migration_context.opts["target_metadata"] if hasattr(mt, "naming_convention"): kw["naming_convention"] = mt.naming_convention return sa_schema.MetaData(**kw) def table(self, name: str, *columns, **kw) -> "Table": m = self.metadata() cols = [ sqla_compat._copy(c) if c.table is not None else c for c in columns if isinstance(c, Column) ] # these flags have already added their UniqueConstraint / # Index objects to the table, so flip them off here. # SQLAlchemy tometadata() avoids this instead by preserving the # flags and skipping the constraints that have _type_bound on them, # but for a migration we'd rather list out the constraints # explicitly. _constraints_included = kw.pop("_constraints_included", False) if _constraints_included: for c in cols: c.unique = c.index = False t = sa_schema.Table(name, m, *cols, **kw) constraints = [ sqla_compat._copy(elem, target_table=t) if getattr(elem, "parent", None) is not None else elem for elem in columns if isinstance(elem, (Constraint, Index)) ] for const in constraints: t.append_constraint(const) for f in t.foreign_keys: self._ensure_table_for_fk(m, f) return t def column(self, name: str, type_: "TypeEngine", **kw) -> "Column": return sa_schema.Column(name, type_, **kw) def index( self, name: str, tablename: Optional[str], columns: Sequence[Union[str, "TextClause", "ColumnElement[Any]"]], schema: Optional[str] = None, **kw ) -> "Index": t = sa_schema.Table( tablename or "no_table", self.metadata(), schema=schema, ) kw["_table"] = t idx = sa_schema.Index( name, *[util.sqla_compat._textual_index_column(t, n) for n in columns], **kw ) return idx def _parse_table_key(self, table_key: str) -> Tuple[Optional[str], str]: if "." in table_key: tokens = table_key.split(".") sname: Optional[str] = ".".join(tokens[0:-1]) tname = tokens[-1] else: tname = table_key sname = None return (sname, tname) def _ensure_table_for_fk( self, metadata: "MetaData", fk: "ForeignKey" ) -> None: """create a placeholder Table object for the referent of a ForeignKey. """ if isinstance(fk._colspec, str): # type:ignore[attr-defined] table_key, cname = fk._colspec.rsplit( # type:ignore[attr-defined] ".", 1 ) sname, tname = self._parse_table_key(table_key) if table_key not in metadata.tables: rel_t = sa_schema.Table(tname, metadata, schema=sname) else: rel_t = metadata.tables[table_key] if cname not in rel_t.c: rel_t.append_column(sa_schema.Column(cname, NULLTYPE)) alembic-rel_1_7_6/alembic/operations/toimpl.py000066400000000000000000000145651417624537100216070ustar00rootroot00000000000000from typing import TYPE_CHECKING from sqlalchemy import schema as sa_schema from . import ops from .base import Operations from ..util.sqla_compat import _copy if TYPE_CHECKING: from sqlalchemy.sql.schema import Table @Operations.implementation_for(ops.AlterColumnOp) def alter_column( operations: "Operations", operation: "ops.AlterColumnOp" ) -> None: compiler = operations.impl.dialect.statement_compiler( operations.impl.dialect, None ) existing_type = operation.existing_type existing_nullable = operation.existing_nullable existing_server_default = operation.existing_server_default type_ = operation.modify_type column_name = operation.column_name table_name = operation.table_name schema = operation.schema server_default = operation.modify_server_default new_column_name = operation.modify_name nullable = operation.modify_nullable comment = operation.modify_comment existing_comment = operation.existing_comment def _count_constraint(constraint): return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and ( not constraint._create_rule or constraint._create_rule(compiler) ) if existing_type and type_: t = operations.schema_obj.table( table_name, sa_schema.Column(column_name, existing_type), schema=schema, ) for constraint in t.constraints: if _count_constraint(constraint): operations.impl.drop_constraint(constraint) operations.impl.alter_column( table_name, column_name, nullable=nullable, server_default=server_default, name=new_column_name, type_=type_, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, comment=comment, existing_comment=existing_comment, **operation.kw ) if type_: t = operations.schema_obj.table( table_name, operations.schema_obj.column(column_name, type_), schema=schema, ) for constraint in t.constraints: if _count_constraint(constraint): operations.impl.add_constraint(constraint) @Operations.implementation_for(ops.DropTableOp) def drop_table(operations: "Operations", operation: "ops.DropTableOp") -> None: operations.impl.drop_table( operation.to_table(operations.migration_context) ) @Operations.implementation_for(ops.DropColumnOp) def drop_column( operations: "Operations", operation: "ops.DropColumnOp" ) -> None: column = operation.to_column(operations.migration_context) operations.impl.drop_column( operation.table_name, column, schema=operation.schema, **operation.kw ) @Operations.implementation_for(ops.CreateIndexOp) def create_index( operations: "Operations", operation: "ops.CreateIndexOp" ) -> None: idx = operation.to_index(operations.migration_context) operations.impl.create_index(idx) @Operations.implementation_for(ops.DropIndexOp) def drop_index(operations: "Operations", operation: "ops.DropIndexOp") -> None: operations.impl.drop_index( operation.to_index(operations.migration_context) ) @Operations.implementation_for(ops.CreateTableOp) def create_table( operations: "Operations", operation: "ops.CreateTableOp" ) -> "Table": table = operation.to_table(operations.migration_context) operations.impl.create_table(table) return table @Operations.implementation_for(ops.RenameTableOp) def rename_table( operations: "Operations", operation: "ops.RenameTableOp" ) -> None: operations.impl.rename_table( operation.table_name, operation.new_table_name, schema=operation.schema ) @Operations.implementation_for(ops.CreateTableCommentOp) def create_table_comment( operations: "Operations", operation: "ops.CreateTableCommentOp" ) -> None: table = operation.to_table(operations.migration_context) operations.impl.create_table_comment(table) @Operations.implementation_for(ops.DropTableCommentOp) def drop_table_comment( operations: "Operations", operation: "ops.DropTableCommentOp" ) -> None: table = operation.to_table(operations.migration_context) operations.impl.drop_table_comment(table) @Operations.implementation_for(ops.AddColumnOp) def add_column(operations: "Operations", operation: "ops.AddColumnOp") -> None: table_name = operation.table_name column = operation.column schema = operation.schema kw = operation.kw if column.table is not None: column = _copy(column) t = operations.schema_obj.table(table_name, column, schema=schema) operations.impl.add_column(table_name, column, schema=schema, **kw) for constraint in t.constraints: if not isinstance(constraint, sa_schema.PrimaryKeyConstraint): operations.impl.add_constraint(constraint) for index in t.indexes: operations.impl.create_index(index) with_comment = ( operations.impl.dialect.supports_comments and not operations.impl.dialect.inline_comments ) comment = column.comment if comment and with_comment: operations.impl.create_column_comment(column) @Operations.implementation_for(ops.AddConstraintOp) def create_constraint( operations: "Operations", operation: "ops.AddConstraintOp" ) -> None: operations.impl.add_constraint( operation.to_constraint(operations.migration_context) ) @Operations.implementation_for(ops.DropConstraintOp) def drop_constraint( operations: "Operations", operation: "ops.DropConstraintOp" ) -> None: operations.impl.drop_constraint( operations.schema_obj.generic_constraint( operation.constraint_name, operation.table_name, operation.constraint_type, schema=operation.schema, ) ) @Operations.implementation_for(ops.BulkInsertOp) def bulk_insert( operations: "Operations", operation: "ops.BulkInsertOp" ) -> None: operations.impl.bulk_insert( operation.table, operation.rows, multiinsert=operation.multiinsert ) @Operations.implementation_for(ops.ExecuteSQLOp) def execute_sql( operations: "Operations", operation: "ops.ExecuteSQLOp" ) -> None: operations.migration_context.impl.execute( operation.sqltext, execution_options=operation.execution_options ) alembic-rel_1_7_6/alembic/py.typed000066400000000000000000000000001417624537100172200ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/runtime/000077500000000000000000000000001417624537100172165ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/runtime/__init__.py000066400000000000000000000000001417624537100213150ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/runtime/environment.py000066400000000000000000001131731417624537100221420ustar00rootroot00000000000000from typing import Callable from typing import ContextManager from typing import Dict from typing import List from typing import Optional from typing import overload from typing import TextIO from typing import Tuple from typing import TYPE_CHECKING from typing import Union from .migration import MigrationContext from .. import util from ..operations import Operations if TYPE_CHECKING: from typing import Literal from sqlalchemy.engine.base import Connection from sqlalchemy.sql.schema import MetaData from .migration import _ProxyTransaction from ..config import Config from ..script.base import ScriptDirectory _RevNumber = Optional[Union[str, Tuple[str, ...]]] class EnvironmentContext(util.ModuleClsProxy): """A configurational facade made available in an ``env.py`` script. The :class:`.EnvironmentContext` acts as a *facade* to the more nuts-and-bolts objects of :class:`.MigrationContext` as well as certain aspects of :class:`.Config`, within the context of the ``env.py`` script that is invoked by most Alembic commands. :class:`.EnvironmentContext` is normally instantiated when a command in :mod:`alembic.command` is run. It then makes itself available in the ``alembic.context`` module for the scope of the command. From within an ``env.py`` script, the current :class:`.EnvironmentContext` is available by importing this module. :class:`.EnvironmentContext` also supports programmatic usage. At this level, it acts as a Python context manager, that is, is intended to be used using the ``with:`` statement. A typical use of :class:`.EnvironmentContext`:: from alembic.config import Config from alembic.script import ScriptDirectory config = Config() config.set_main_option("script_location", "myapp:migrations") script = ScriptDirectory.from_config(config) def my_function(rev, context): '''do something with revision "rev", which will be the current database revision, and "context", which is the MigrationContext that the env.py will create''' with EnvironmentContext( config, script, fn = my_function, as_sql = False, starting_rev = 'base', destination_rev = 'head', tag = "sometag" ): script.run_env() The above script will invoke the ``env.py`` script within the migration environment. If and when ``env.py`` calls :meth:`.MigrationContext.run_migrations`, the ``my_function()`` function above will be called by the :class:`.MigrationContext`, given the context itself as well as the current revision in the database. .. note:: For most API usages other than full blown invocation of migration scripts, the :class:`.MigrationContext` and :class:`.ScriptDirectory` objects can be created and used directly. The :class:`.EnvironmentContext` object is *only* needed when you need to actually invoke the ``env.py`` module present in the migration environment. """ _migration_context: Optional["MigrationContext"] = None config: "Config" = None # type:ignore[assignment] """An instance of :class:`.Config` representing the configuration file contents as well as other variables set programmatically within it.""" script: "ScriptDirectory" = None # type:ignore[assignment] """An instance of :class:`.ScriptDirectory` which provides programmatic access to version files within the ``versions/`` directory. """ def __init__( self, config: "Config", script: "ScriptDirectory", **kw ) -> None: r"""Construct a new :class:`.EnvironmentContext`. :param config: a :class:`.Config` instance. :param script: a :class:`.ScriptDirectory` instance. :param \**kw: keyword options that will be ultimately passed along to the :class:`.MigrationContext` when :meth:`.EnvironmentContext.configure` is called. """ self.config = config self.script = script self.context_opts = kw def __enter__(self) -> "EnvironmentContext": """Establish a context which provides a :class:`.EnvironmentContext` object to env.py scripts. The :class:`.EnvironmentContext` will be made available as ``from alembic import context``. """ self._install_proxy() return self def __exit__(self, *arg, **kw) -> None: self._remove_proxy() def is_offline_mode(self) -> bool: """Return True if the current migrations environment is running in "offline mode". This is ``True`` or ``False`` depending on the ``--sql`` flag passed. This function does not require that the :class:`.MigrationContext` has been configured. """ return self.context_opts.get("as_sql", False) def is_transactional_ddl(self): """Return True if the context is configured to expect a transactional DDL capable backend. This defaults to the type of database in use, and can be overridden by the ``transactional_ddl`` argument to :meth:`.configure` This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ return self.get_context().impl.transactional_ddl def requires_connection(self) -> bool: return not self.is_offline_mode() def get_head_revision(self) -> _RevNumber: """Return the hex identifier of the 'head' script revision. If the script directory has multiple heads, this method raises a :class:`.CommandError`; :meth:`.EnvironmentContext.get_head_revisions` should be preferred. This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_head_revisions` """ return self.script.as_revision_number("head") def get_head_revisions(self) -> _RevNumber: """Return the hex identifier of the 'heads' script revision(s). This returns a tuple containing the version number of all heads in the script directory. This function does not require that the :class:`.MigrationContext` has been configured. """ return self.script.as_revision_number("heads") def get_starting_revision_argument(self) -> _RevNumber: """Return the 'starting revision' argument, if the revision was passed using ``start:end``. This is only meaningful in "offline" mode. Returns ``None`` if no value is available or was configured. This function does not require that the :class:`.MigrationContext` has been configured. """ if self._migration_context is not None: return self.script.as_revision_number( self.get_context()._start_from_rev ) elif "starting_rev" in self.context_opts: return self.script.as_revision_number( self.context_opts["starting_rev"] ) else: # this should raise only in the case that a command # is being run where the "starting rev" is never applicable; # this is to catch scripts which rely upon this in # non-sql mode or similar raise util.CommandError( "No starting revision argument is available." ) def get_revision_argument(self) -> _RevNumber: """Get the 'destination' revision argument. This is typically the argument passed to the ``upgrade`` or ``downgrade`` command. If it was specified as ``head``, the actual version number is returned; if specified as ``base``, ``None`` is returned. This function does not require that the :class:`.MigrationContext` has been configured. """ return self.script.as_revision_number( self.context_opts["destination_rev"] ) def get_tag_argument(self) -> Optional[str]: """Return the value passed for the ``--tag`` argument, if any. The ``--tag`` argument is not used directly by Alembic, but is available for custom ``env.py`` configurations that wish to use it; particularly for offline generation scripts that wish to generate tagged filenames. This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_x_argument` - a newer and more open ended system of extending ``env.py`` scripts via the command line. """ return self.context_opts.get("tag", None) @overload def get_x_argument( # type:ignore[misc] self, as_dictionary: "Literal[False]" = ... ) -> List[str]: ... @overload def get_x_argument( # type:ignore[misc] self, as_dictionary: "Literal[True]" = ... ) -> Dict[str, str]: ... def get_x_argument(self, as_dictionary: bool = False): """Return the value(s) passed for the ``-x`` argument, if any. The ``-x`` argument is an open ended flag that allows any user-defined value or values to be passed on the command line, then available here for consumption by a custom ``env.py`` script. The return value is a list, returned directly from the ``argparse`` structure. If ``as_dictionary=True`` is passed, the ``x`` arguments are parsed using ``key=value`` format into a dictionary that is then returned. For example, to support passing a database URL on the command line, the standard ``env.py`` script can be modified like this:: cmd_line_url = context.get_x_argument( as_dictionary=True).get('dbname') if cmd_line_url: engine = create_engine(cmd_line_url) else: engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) This then takes effect by running the ``alembic`` script as:: alembic -x dbname=postgresql://user:pass@host/dbname upgrade head This function does not require that the :class:`.MigrationContext` has been configured. .. seealso:: :meth:`.EnvironmentContext.get_tag_argument` :attr:`.Config.cmd_opts` """ if self.config.cmd_opts is not None: value = self.config.cmd_opts.x or [] else: value = [] if as_dictionary: value = dict(arg.split("=", 1) for arg in value) return value def configure( self, connection: Optional["Connection"] = None, url: Optional[str] = None, dialect_name: Optional[str] = None, dialect_opts: Optional[dict] = None, transactional_ddl: Optional[bool] = None, transaction_per_migration: bool = False, output_buffer: Optional[TextIO] = None, starting_rev: Optional[str] = None, tag: Optional[str] = None, template_args: Optional[dict] = None, render_as_batch: bool = False, target_metadata: Optional["MetaData"] = None, include_name: Optional[Callable] = None, include_object: Optional[Callable] = None, include_schemas: bool = False, process_revision_directives: Optional[Callable] = None, compare_type: bool = False, compare_server_default: bool = False, render_item: Optional[Callable] = None, literal_binds: bool = False, upgrade_token: str = "upgrades", downgrade_token: str = "downgrades", alembic_module_prefix: str = "op.", sqlalchemy_module_prefix: str = "sa.", user_module_prefix: Optional[str] = None, on_version_apply: Optional[Callable] = None, **kw ) -> None: """Configure a :class:`.MigrationContext` within this :class:`.EnvironmentContext` which will provide database connectivity and other configuration to a series of migration scripts. Many methods on :class:`.EnvironmentContext` require that this method has been called in order to function, as they ultimately need to have database access or at least access to the dialect in use. Those which do are documented as such. The important thing needed by :meth:`.configure` is a means to determine what kind of database dialect is in use. An actual connection to that database is needed only if the :class:`.MigrationContext` is to be used in "online" mode. If the :meth:`.is_offline_mode` function returns ``True``, then no connection is needed here. Otherwise, the ``connection`` parameter should be present as an instance of :class:`sqlalchemy.engine.Connection`. This function is typically called from the ``env.py`` script within a migration environment. It can be called multiple times for an invocation. The most recent :class:`~sqlalchemy.engine.Connection` for which it was called is the one that will be operated upon by the next call to :meth:`.run_migrations`. General parameters: :param connection: a :class:`~sqlalchemy.engine.Connection` to use for SQL execution in "online" mode. When present, is also used to determine the type of dialect in use. :param url: a string database url, or a :class:`sqlalchemy.engine.url.URL` object. The type of dialect to be used will be derived from this if ``connection`` is not passed. :param dialect_name: string name of a dialect, such as "postgresql", "mssql", etc. The type of dialect to be used will be derived from this if ``connection`` and ``url`` are not passed. :param dialect_opts: dictionary of options to be passed to dialect constructor. .. versionadded:: 1.0.12 :param transactional_ddl: Force the usage of "transactional" DDL on or off; this otherwise defaults to whether or not the dialect in use supports it. :param transaction_per_migration: if True, nest each migration script in a transaction rather than the full series of migrations to run. :param output_buffer: a file-like object that will be used for textual output when the ``--sql`` option is used to generate SQL scripts. Defaults to ``sys.stdout`` if not passed here and also not present on the :class:`.Config` object. The value here overrides that of the :class:`.Config` object. :param output_encoding: when using ``--sql`` to generate SQL scripts, apply this encoding to the string output. :param literal_binds: when using ``--sql`` to generate SQL scripts, pass through the ``literal_binds`` flag to the compiler so that any literal values that would ordinarily be bound parameters are converted to plain strings. .. warning:: Dialects can typically only handle simple datatypes like strings and numbers for auto-literal generation. Datatypes like dates, intervals, and others may still require manual formatting, typically using :meth:`.Operations.inline_literal`. .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy versions prior to 0.8 where this feature is not supported. .. seealso:: :meth:`.Operations.inline_literal` :param starting_rev: Override the "starting revision" argument when using ``--sql`` mode. :param tag: a string tag for usage by custom ``env.py`` scripts. Set via the ``--tag`` option, can be overridden here. :param template_args: dictionary of template arguments which will be added to the template argument environment when running the "revision" command. Note that the script environment is only run within the "revision" command if the --autogenerate option is used, or if the option "revision_environment=true" is present in the alembic.ini file. :param version_table: The name of the Alembic version table. The default is ``'alembic_version'``. :param version_table_schema: Optional schema to place version table within. :param version_table_pk: boolean, whether the Alembic version table should use a primary key constraint for the "value" column; this only takes effect when the table is first created. Defaults to True; setting to False should not be necessary and is here for backwards compatibility reasons. :param on_version_apply: a callable or collection of callables to be run for each migration step. The callables will be run in the order they are given, once for each migration step, after the respective operation has been applied but before its transaction is finalized. Each callable accepts no positional arguments and the following keyword arguments: * ``ctx``: the :class:`.MigrationContext` running the migration, * ``step``: a :class:`.MigrationInfo` representing the step currently being applied, * ``heads``: a collection of version strings representing the current heads, * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`. Parameters specific to the autogenerate feature, when ``alembic revision`` is run with the ``--autogenerate`` feature: :param target_metadata: a :class:`sqlalchemy.schema.MetaData` object, or a sequence of :class:`~sqlalchemy.schema.MetaData` objects, that will be consulted during autogeneration. The tables present in each :class:`~sqlalchemy.schema.MetaData` will be compared against what is locally available on the target :class:`~sqlalchemy.engine.Connection` to produce candidate upgrade/downgrade operations. :param compare_type: Indicates type comparison behavior during an autogenerate operation. Defaults to ``False`` which disables type comparison. Set to ``True`` to turn on default type comparison, which has varied accuracy depending on backend. See :ref:`compare_types` for an example as well as information on other type comparison options. .. seealso:: :ref:`compare_types` :paramref:`.EnvironmentContext.configure.compare_server_default` :param compare_server_default: Indicates server default comparison behavior during an autogenerate operation. Defaults to ``False`` which disables server default comparison. Set to ``True`` to turn on server default comparison, which has varied accuracy depending on backend. To customize server default comparison behavior, a callable may be specified which can filter server default comparisons during an autogenerate operation. defaults during an autogenerate operation. The format of this callable is:: def my_compare_server_default(context, inspected_column, metadata_column, inspected_default, metadata_default, rendered_metadata_default): # return True if the defaults are different, # False if not, or None to allow the default implementation # to compare these defaults return None context.configure( # ... compare_server_default = my_compare_server_default ) ``inspected_column`` is a dictionary structure as returned by :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from the local model environment. A return value of ``None`` indicates to allow default server default comparison to proceed. Note that some backends such as Postgresql actually execute the two defaults on the database side to compare for equivalence. .. seealso:: :paramref:`.EnvironmentContext.configure.compare_type` :param include_name: A callable function which is given the chance to return ``True`` or ``False`` for any database reflected object based on its name, including database schema names when the :paramref:`.EnvironmentContext.configure.include_schemas` flag is set to ``True``. The function accepts the following positional arguments: * ``name``: the name of the object, such as schema name or table name. Will be ``None`` when indicating the default schema name of the database connection. * ``type``: a string describing the type of object; currently ``"schema"``, ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, or ``"foreign_key_constraint"`` * ``parent_names``: a dictionary of "parent" object names, that are relative to the name being given. Keys in this dictionary may include: ``"schema_name"``, ``"table_name"``. E.g.:: def include_name(name, type_, parent_names): if type_ == "schema": return name in ["schema_one", "schema_two"] else: return True context.configure( # ... include_schemas = True, include_name = include_name ) .. versionadded:: 1.5 .. seealso:: :ref:`autogenerate_include_hooks` :paramref:`.EnvironmentContext.configure.include_object` :paramref:`.EnvironmentContext.configure.include_schemas` :param include_object: A callable function which is given the chance to return ``True`` or ``False`` for any object, indicating if the given object should be considered in the autogenerate sweep. The function accepts the following positional arguments: * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such as a :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.Index` :class:`~sqlalchemy.schema.UniqueConstraint`, or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object * ``name``: the name of the object. This is typically available via ``object.name``. * ``type``: a string describing the type of object; currently ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, or ``"foreign_key_constraint"`` * ``reflected``: ``True`` if the given object was produced based on table reflection, ``False`` if it's from a local :class:`.MetaData` object. * ``compare_to``: the object being compared against, if available, else ``None``. E.g.:: def include_object(object, name, type_, reflected, compare_to): if (type_ == "column" and not reflected and object.info.get("skip_autogenerate", False)): return False else: return True context.configure( # ... include_object = include_object ) For the use case of omitting specific schemas from a target database when :paramref:`.EnvironmentContext.configure.include_schemas` is set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema` attribute can be checked for each :class:`~sqlalchemy.schema.Table` object passed to the hook, however it is much more efficient to filter on schemas before reflection of objects takes place using the :paramref:`.EnvironmentContext.configure.include_name` hook. .. seealso:: :ref:`autogenerate_include_hooks` :paramref:`.EnvironmentContext.configure.include_name` :paramref:`.EnvironmentContext.configure.include_schemas` :param render_as_batch: if True, commands which alter elements within a table will be placed under a ``with batch_alter_table():`` directive, so that batch migrations will take place. .. seealso:: :ref:`batch_migrations` :param include_schemas: If True, autogenerate will scan across all schemas located by the SQLAlchemy :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` method, and include all differences in tables found across all those schemas. When using this option, you may want to also use the :paramref:`.EnvironmentContext.configure.include_name` parameter to specify a callable which can filter the tables/schemas that get included. .. seealso:: :ref:`autogenerate_include_hooks` :paramref:`.EnvironmentContext.configure.include_name` :paramref:`.EnvironmentContext.configure.include_object` :param render_item: Callable that can be used to override how any schema item, i.e. column, constraint, type, etc., is rendered for autogenerate. The callable receives a string describing the type of object, the object, and the autogen context. If it returns False, the default rendering method will be used. If it returns None, the item will not be rendered in the context of a Table construct, that is, can be used to skip columns or constraints within op.create_table():: def my_render_column(type_, col, autogen_context): if type_ == "column" and isinstance(col, MySpecialCol): return repr(col) else: return False context.configure( # ... render_item = my_render_column ) Available values for the type string include: ``"column"``, ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, ``"type"``, ``"server_default"``. .. seealso:: :ref:`autogen_render_types` :param upgrade_token: When autogenerate completes, the text of the candidate upgrade operations will be present in this template variable when ``script.py.mako`` is rendered. Defaults to ``upgrades``. :param downgrade_token: When autogenerate completes, the text of the candidate downgrade operations will be present in this template variable when ``script.py.mako`` is rendered. Defaults to ``downgrades``. :param alembic_module_prefix: When autogenerate refers to Alembic :mod:`alembic.operations` constructs, this prefix will be used (i.e. ``op.create_table``) Defaults to "``op.``". Can be ``None`` to indicate no prefix. :param sqlalchemy_module_prefix: When autogenerate refers to SQLAlchemy :class:`~sqlalchemy.schema.Column` or type classes, this prefix will be used (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". Can be ``None`` to indicate no prefix. Note that when dialect-specific types are rendered, autogenerate will render them using the dialect module name, i.e. ``mssql.BIT()``, ``postgresql.UUID()``. :param user_module_prefix: When autogenerate refers to a SQLAlchemy type (e.g. :class:`.TypeEngine`) where the module name is not under the ``sqlalchemy`` namespace, this prefix will be used within autogenerate. If left at its default of ``None``, the ``__module__`` attribute of the type is used to render the import module. It's a good practice to set this and to have all custom types be available from a fixed module space, in order to future-proof migration files against reorganizations in modules. .. seealso:: :ref:`autogen_module_prefix` :param process_revision_directives: a callable function that will be passed a structure representing the end result of an autogenerate or plain "revision" operation, which can be manipulated to affect how the ``alembic revision`` command ultimately outputs new revision scripts. The structure of the callable is:: def process_revision_directives(context, revision, directives): pass The ``directives`` parameter is a Python list containing a single :class:`.MigrationScript` directive, which represents the revision file to be generated. This list as well as its contents may be freely modified to produce any set of commands. The section :ref:`customizing_revision` shows an example of doing this. The ``context`` parameter is the :class:`.MigrationContext` in use, and ``revision`` is a tuple of revision identifiers representing the current revision of the database. The callable is invoked at all times when the ``--autogenerate`` option is passed to ``alembic revision``. If ``--autogenerate`` is not passed, the callable is invoked only if the ``revision_environment`` variable is set to True in the Alembic configuration, in which case the given ``directives`` collection will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps` collections for ``.upgrade_ops`` and ``.downgrade_ops``. The ``--autogenerate`` option itself can be inferred by inspecting ``context.config.cmd_opts.autogenerate``. The callable function may optionally be an instance of a :class:`.Rewriter` object. This is a helper object that assists in the production of autogenerate-stream rewriter functions. .. seealso:: :ref:`customizing_revision` :ref:`autogen_rewriter` :paramref:`.command.revision.process_revision_directives` Parameters specific to individual backends: :param mssql_batch_separator: The "batch separator" which will be placed between each statement when generating offline SQL Server migrations. Defaults to ``GO``. Note this is in addition to the customary semicolon ``;`` at the end of each statement; SQL Server considers the "batch separator" to denote the end of an individual statement execution, and cannot group certain dependent operations in one step. :param oracle_batch_separator: The "batch separator" which will be placed between each statement when generating offline Oracle migrations. Defaults to ``/``. Oracle doesn't add a semicolon between statements like most other backends. """ opts = self.context_opts if transactional_ddl is not None: opts["transactional_ddl"] = transactional_ddl if output_buffer is not None: opts["output_buffer"] = output_buffer elif self.config.output_buffer is not None: opts["output_buffer"] = self.config.output_buffer if starting_rev: opts["starting_rev"] = starting_rev if tag: opts["tag"] = tag if template_args and "template_args" in opts: opts["template_args"].update(template_args) opts["transaction_per_migration"] = transaction_per_migration opts["target_metadata"] = target_metadata opts["include_name"] = include_name opts["include_object"] = include_object opts["include_schemas"] = include_schemas opts["render_as_batch"] = render_as_batch opts["upgrade_token"] = upgrade_token opts["downgrade_token"] = downgrade_token opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix opts["alembic_module_prefix"] = alembic_module_prefix opts["user_module_prefix"] = user_module_prefix opts["literal_binds"] = literal_binds opts["process_revision_directives"] = process_revision_directives opts["on_version_apply"] = util.to_tuple(on_version_apply, default=()) if render_item is not None: opts["render_item"] = render_item if compare_type is not None: opts["compare_type"] = compare_type if compare_server_default is not None: opts["compare_server_default"] = compare_server_default opts["script"] = self.script opts.update(kw) self._migration_context = MigrationContext.configure( connection=connection, url=url, dialect_name=dialect_name, environment_context=self, dialect_opts=dialect_opts, opts=opts, ) def run_migrations(self, **kw) -> None: """Run migrations as determined by the current command line configuration as well as versioning information present (or not) in the current database connection (if one is present). The function accepts optional ``**kw`` arguments. If these are passed, they are sent directly to the ``upgrade()`` and ``downgrade()`` functions within each target revision file. By modifying the ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` functions accept arguments, parameters can be passed here so that contextual information, usually information to identify a particular database in use, can be passed from a custom ``env.py`` script to the migration functions. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ assert self._migration_context is not None with Operations.context(self._migration_context): self.get_context().run_migrations(**kw) def execute(self, sql, execution_options=None): """Execute the given SQL using the current change context. The behavior of :meth:`.execute` is the same as that of :meth:`.Operations.execute`. Please see that function's documentation for full detail including caveats and limitations. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ self.get_context().execute(sql, execution_options=execution_options) def static_output(self, text): """Emit text directly to the "offline" SQL stream. Typically this is for emitting comments that start with --. The statement is not treated as a SQL execution, no ; or batch separator is added, etc. """ self.get_context().impl.static_output(text) def begin_transaction( self, ) -> Union["_ProxyTransaction", ContextManager]: """Return a context manager that will enclose an operation within a "transaction", as defined by the environment's offline and transactional DDL settings. e.g.:: with context.begin_transaction(): context.run_migrations() :meth:`.begin_transaction` is intended to "do the right thing" regardless of calling context: * If :meth:`.is_transactional_ddl` is ``False``, returns a "do nothing" context manager which otherwise produces no transactional state or directives. * If :meth:`.is_offline_mode` is ``True``, returns a context manager that will invoke the :meth:`.DefaultImpl.emit_begin` and :meth:`.DefaultImpl.emit_commit` methods, which will produce the string directives ``BEGIN`` and ``COMMIT`` on the output stream, as rendered by the target backend (e.g. SQL Server would emit ``BEGIN TRANSACTION``). * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` on the current online connection, which returns a :class:`sqlalchemy.engine.Transaction` object. This object demarcates a real transaction and is itself a context manager, which will roll back if an exception is raised. Note that a custom ``env.py`` script which has more specific transactional needs can of course manipulate the :class:`~sqlalchemy.engine.Connection` directly to produce transactional state in "online" mode. """ return self.get_context().begin_transaction() def get_context(self) -> "MigrationContext": """Return the current :class:`.MigrationContext` object. If :meth:`.EnvironmentContext.configure` has not been called yet, raises an exception. """ if self._migration_context is None: raise Exception("No context has been configured yet.") return self._migration_context def get_bind(self): """Return the current 'bind'. In "online" mode, this is the :class:`sqlalchemy.engine.Connection` currently being used to emit SQL to the database. This function requires that a :class:`.MigrationContext` has first been made available via :meth:`.configure`. """ return self.get_context().bind def get_impl(self): return self.get_context().impl alembic-rel_1_7_6/alembic/runtime/migration.py000066400000000000000000001376261417624537100216000ustar00rootroot00000000000000from contextlib import contextmanager import logging import sys from typing import Any from typing import cast from typing import Collection from typing import ContextManager from typing import Dict from typing import Iterator from typing import List from typing import Optional from typing import Set from typing import Tuple from typing import TYPE_CHECKING from typing import Union from sqlalchemy import Column from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.engine import Engine from sqlalchemy.engine import url as sqla_url from sqlalchemy.engine.strategies import MockEngineStrategy from .. import ddl from .. import util from ..util import sqla_compat from ..util.compat import EncodedIO if TYPE_CHECKING: from sqlalchemy.engine import Dialect from sqlalchemy.engine.base import Connection from sqlalchemy.engine.base import Transaction from sqlalchemy.engine.mock import MockConnection from .environment import EnvironmentContext from ..config import Config from ..script.base import Script from ..script.base import ScriptDirectory from ..script.revision import Revision from ..script.revision import RevisionMap log = logging.getLogger(__name__) class _ProxyTransaction: def __init__(self, migration_context: "MigrationContext") -> None: self.migration_context = migration_context @property def _proxied_transaction(self) -> Optional["Transaction"]: return self.migration_context._transaction def rollback(self) -> None: t = self._proxied_transaction assert t is not None t.rollback() self.migration_context._transaction = None def commit(self) -> None: t = self._proxied_transaction assert t is not None t.commit() self.migration_context._transaction = None def __enter__(self) -> "_ProxyTransaction": return self def __exit__(self, type_: None, value: None, traceback: None) -> None: if self._proxied_transaction is not None: self._proxied_transaction.__exit__(type_, value, traceback) self.migration_context._transaction = None class MigrationContext: """Represent the database state made available to a migration script. :class:`.MigrationContext` is the front end to an actual database connection, or alternatively a string output stream given a particular database dialect, from an Alembic perspective. When inside the ``env.py`` script, the :class:`.MigrationContext` is available via the :meth:`.EnvironmentContext.get_context` method, which is available at ``alembic.context``:: # from within env.py script from alembic import context migration_context = context.get_context() For usage outside of an ``env.py`` script, such as for utility routines that want to check the current version in the database, the :meth:`.MigrationContext.configure` method to create new :class:`.MigrationContext` objects. For example, to get at the current revision in the database using :meth:`.MigrationContext.get_current_revision`:: # in any application, outside of an env.py script from alembic.migration import MigrationContext from sqlalchemy import create_engine engine = create_engine("postgresql://mydatabase") conn = engine.connect() context = MigrationContext.configure(conn) current_rev = context.get_current_revision() The above context can also be used to produce Alembic migration operations with an :class:`.Operations` instance:: # in any application, outside of the normal Alembic environment from alembic.operations import Operations op = Operations(context) op.alter_column("mytable", "somecolumn", nullable=True) """ def __init__( self, dialect: "Dialect", connection: Optional["Connection"], opts: Dict[str, Any], environment_context: Optional["EnvironmentContext"] = None, ) -> None: self.environment_context = environment_context self.opts = opts self.dialect = dialect self.script: Optional["ScriptDirectory"] = opts.get("script") as_sql: bool = opts.get("as_sql", False) transactional_ddl = opts.get("transactional_ddl") self._transaction_per_migration = opts.get( "transaction_per_migration", False ) self.on_version_apply_callbacks = opts.get("on_version_apply", ()) self._transaction: Optional["Transaction"] = None if as_sql: self.connection = cast( Optional["Connection"], self._stdout_connection(connection) ) assert self.connection is not None self._in_external_transaction = False else: self.connection = connection self._in_external_transaction = ( sqla_compat._get_connection_in_transaction(connection) ) self._migrations_fn = opts.get("fn") self.as_sql = as_sql self.purge = opts.get("purge", False) if "output_encoding" in opts: self.output_buffer = EncodedIO( opts.get("output_buffer") or sys.stdout, # type:ignore[arg-type] opts["output_encoding"], ) else: self.output_buffer = opts.get("output_buffer", sys.stdout) self._user_compare_type = opts.get("compare_type", False) self._user_compare_server_default = opts.get( "compare_server_default", False ) self.version_table = version_table = opts.get( "version_table", "alembic_version" ) self.version_table_schema = version_table_schema = opts.get( "version_table_schema", None ) self._version = Table( version_table, MetaData(), Column("version_num", String(32), nullable=False), schema=version_table_schema, ) if opts.get("version_table_pk", True): self._version.append_constraint( PrimaryKeyConstraint( "version_num", name="%s_pkc" % version_table ) ) self._start_from_rev: Optional[str] = opts.get("starting_rev") self.impl = ddl.DefaultImpl.get_by_dialect(dialect)( dialect, self.connection, self.as_sql, transactional_ddl, self.output_buffer, opts, ) log.info("Context impl %s.", self.impl.__class__.__name__) if self.as_sql: log.info("Generating static SQL") log.info( "Will assume %s DDL.", "transactional" if self.impl.transactional_ddl else "non-transactional", ) @classmethod def configure( cls, connection: Optional["Connection"] = None, url: Optional[str] = None, dialect_name: Optional[str] = None, dialect: Optional["Dialect"] = None, environment_context: Optional["EnvironmentContext"] = None, dialect_opts: Optional[Dict[str, str]] = None, opts: Optional[Any] = None, ) -> "MigrationContext": """Create a new :class:`.MigrationContext`. This is a factory method usually called by :meth:`.EnvironmentContext.configure`. :param connection: a :class:`~sqlalchemy.engine.Connection` to use for SQL execution in "online" mode. When present, is also used to determine the type of dialect in use. :param url: a string database url, or a :class:`sqlalchemy.engine.url.URL` object. The type of dialect to be used will be derived from this if ``connection`` is not passed. :param dialect_name: string name of a dialect, such as "postgresql", "mssql", etc. The type of dialect to be used will be derived from this if ``connection`` and ``url`` are not passed. :param opts: dictionary of options. Most other options accepted by :meth:`.EnvironmentContext.configure` are passed via this dictionary. """ if opts is None: opts = {} if dialect_opts is None: dialect_opts = {} if connection: if isinstance(connection, Engine): raise util.CommandError( "'connection' argument to configure() is expected " "to be a sqlalchemy.engine.Connection instance, " "got %r" % connection, ) dialect = connection.dialect elif url: url_obj = sqla_url.make_url(url) dialect = url_obj.get_dialect()(**dialect_opts) elif dialect_name: url_obj = sqla_url.make_url("%s://" % dialect_name) dialect = url_obj.get_dialect()(**dialect_opts) elif not dialect: raise Exception("Connection, url, or dialect_name is required.") assert dialect is not None return MigrationContext(dialect, connection, opts, environment_context) @contextmanager def autocommit_block(self) -> Iterator[None]: """Enter an "autocommit" block, for databases that support AUTOCOMMIT isolation levels. This special directive is intended to support the occasional database DDL or system operation that specifically has to be run outside of any kind of transaction block. The PostgreSQL database platform is the most common target for this style of operation, as many of its DDL operations must be run outside of transaction blocks, even though the database overall supports transactional DDL. The method is used as a context manager within a migration script, by calling on :meth:`.Operations.get_context` to retrieve the :class:`.MigrationContext`, then invoking :meth:`.MigrationContext.autocommit_block` using the ``with:`` statement:: def upgrade(): with op.get_context().autocommit_block(): op.execute("ALTER TYPE mood ADD VALUE 'soso'") Above, a PostgreSQL "ALTER TYPE..ADD VALUE" directive is emitted, which must be run outside of a transaction block at the database level. The :meth:`.MigrationContext.autocommit_block` method makes use of the SQLAlchemy ``AUTOCOMMIT`` isolation level setting, which against the psycogp2 DBAPI corresponds to the ``connection.autocommit`` setting, to ensure that the database driver is not inside of a DBAPI level transaction block. .. warning:: As is necessary, **the database transaction preceding the block is unconditionally committed**. This means that the run of migrations preceding the operation will be committed, before the overall migration operation is complete. It is recommended that when an application includes migrations with "autocommit" blocks, that :paramref:`.EnvironmentContext.transaction_per_migration` be used so that the calling environment is tuned to expect short per-file migrations whether or not one of them has an autocommit block. .. versionadded:: 1.2.0 """ _in_connection_transaction = self._in_connection_transaction() if self.impl.transactional_ddl and self.as_sql: self.impl.emit_commit() elif _in_connection_transaction: assert self._transaction is not None self._transaction.commit() self._transaction = None if not self.as_sql: assert self.connection is not None current_level = self.connection.get_isolation_level() base_connection = self.connection # in 1.3 and 1.4 non-future mode, the connection gets switched # out. we can use the base connection with the new mode # except that it will not know it's in "autocommit" and will # emit deprecation warnings when an autocommit action takes # place. self.connection = ( self.impl.connection ) = base_connection.execution_options(isolation_level="AUTOCOMMIT") # sqlalchemy future mode will "autobegin" in any case, so take # control of that "transaction" here fake_trans: Optional[Transaction] = self.connection.begin() else: fake_trans = None try: yield finally: if not self.as_sql: assert self.connection is not None if fake_trans is not None: fake_trans.commit() self.connection.execution_options( isolation_level=current_level ) self.connection = self.impl.connection = base_connection if self.impl.transactional_ddl and self.as_sql: self.impl.emit_begin() elif _in_connection_transaction: assert self.connection is not None self._transaction = self.connection.begin() def begin_transaction( self, _per_migration: bool = False ) -> Union["_ProxyTransaction", ContextManager]: """Begin a logical transaction for migration operations. This method is used within an ``env.py`` script to demarcate where the outer "transaction" for a series of migrations begins. Example:: def run_migrations_online(): connectable = create_engine(...) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() Above, :meth:`.MigrationContext.begin_transaction` is used to demarcate where the outer logical transaction occurs around the :meth:`.MigrationContext.run_migrations` operation. A "Logical" transaction means that the operation may or may not correspond to a real database transaction. If the target database supports transactional DDL (or :paramref:`.EnvironmentContext.configure.transactional_ddl` is true), the :paramref:`.EnvironmentContext.configure.transaction_per_migration` flag is not set, and the migration is against a real database connection (as opposed to using "offline" ``--sql`` mode), a real transaction will be started. If ``--sql`` mode is in effect, the operation would instead correspond to a string such as "BEGIN" being emitted to the string output. The returned object is a Python context manager that should only be used in the context of a ``with:`` statement as indicated above. The object has no other guaranteed API features present. .. seealso:: :meth:`.MigrationContext.autocommit_block` """ @contextmanager def do_nothing(): yield if self._in_external_transaction: return do_nothing() if self.impl.transactional_ddl: transaction_now = _per_migration == self._transaction_per_migration else: transaction_now = _per_migration is True if not transaction_now: return do_nothing() elif not self.impl.transactional_ddl: assert _per_migration if self.as_sql: return do_nothing() else: # track our own notion of a "transaction block", which must be # committed when complete. Don't rely upon whether or not the # SQLAlchemy connection reports as "in transaction"; this # because SQLAlchemy future connection features autobegin # behavior, so it may already be in a transaction from our # emitting of queries like "has_version_table", etc. While we # could track these operations as well, that leaves open the # possibility of new operations or other things happening in # the user environment that still may be triggering # "autobegin". in_transaction = self._transaction is not None if in_transaction: return do_nothing() else: assert self.connection is not None self._transaction = ( sqla_compat._safe_begin_connection_transaction( self.connection ) ) return _ProxyTransaction(self) elif self.as_sql: @contextmanager def begin_commit(): self.impl.emit_begin() yield self.impl.emit_commit() return begin_commit() else: assert self.connection is not None self._transaction = sqla_compat._safe_begin_connection_transaction( self.connection ) return _ProxyTransaction(self) def get_current_revision(self) -> Optional[str]: """Return the current revision, usually that which is present in the ``alembic_version`` table in the database. This method intends to be used only for a migration stream that does not contain unmerged branches in the target database; if there are multiple branches present, an exception is raised. The :meth:`.MigrationContext.get_current_heads` should be preferred over this method going forward in order to be compatible with branch migration support. If this :class:`.MigrationContext` was configured in "offline" mode, that is with ``as_sql=True``, the ``starting_rev`` parameter is returned instead, if any. """ heads = self.get_current_heads() if len(heads) == 0: return None elif len(heads) > 1: raise util.CommandError( "Version table '%s' has more than one head present; " "please use get_current_heads()" % self.version_table ) else: return heads[0] def get_current_heads(self) -> Tuple[str, ...]: """Return a tuple of the current 'head versions' that are represented in the target database. For a migration stream without branches, this will be a single value, synonymous with that of :meth:`.MigrationContext.get_current_revision`. However when multiple unmerged branches exist within the target database, the returned tuple will contain a value for each head. If this :class:`.MigrationContext` was configured in "offline" mode, that is with ``as_sql=True``, the ``starting_rev`` parameter is returned in a one-length tuple. If no version table is present, or if there are no revisions present, an empty tuple is returned. """ if self.as_sql: start_from_rev: Any = self._start_from_rev if start_from_rev == "base": start_from_rev = None elif start_from_rev is not None and self.script: start_from_rev = [ self.script.get_revision(sfr).revision for sfr in util.to_list(start_from_rev) if sfr not in (None, "base") ] return util.to_tuple(start_from_rev, default=()) else: if self._start_from_rev: raise util.CommandError( "Can't specify current_rev to context " "when using a database connection" ) if not self._has_version_table(): return () assert self.connection is not None return tuple( row[0] for row in self.connection.execute(self._version.select()) ) def _ensure_version_table(self, purge: bool = False) -> None: with sqla_compat._ensure_scope_for_ddl(self.connection): self._version.create(self.connection, checkfirst=True) if purge: assert self.connection is not None self.connection.execute(self._version.delete()) def _has_version_table(self) -> bool: assert self.connection is not None return sqla_compat._connectable_has_table( self.connection, self.version_table, self.version_table_schema ) def stamp( self, script_directory: "ScriptDirectory", revision: str ) -> None: """Stamp the version table with a specific revision. This method calculates those branches to which the given revision can apply, and updates those branches as though they were migrated towards that revision (either up or down). If no current branches include the revision, it is added as a new branch head. """ heads = self.get_current_heads() if not self.as_sql and not heads: self._ensure_version_table() head_maintainer = HeadMaintainer(self, heads) for step in script_directory._stamp_revs(revision, heads): head_maintainer.update_to_step(step) def run_migrations(self, **kw) -> None: r"""Run the migration scripts established for this :class:`.MigrationContext`, if any. The commands in :mod:`alembic.command` will set up a function that is ultimately passed to the :class:`.MigrationContext` as the ``fn`` argument. This function represents the "work" that will be done when :meth:`.MigrationContext.run_migrations` is called, typically from within the ``env.py`` script of the migration environment. The "work function" then provides an iterable of version callables and other version information which in the case of the ``upgrade`` or ``downgrade`` commands are the list of version scripts to invoke. Other commands yield nothing, in the case that a command wants to run some other operation against the database such as the ``current`` or ``stamp`` commands. :param \**kw: keyword arguments here will be passed to each migration callable, that is the ``upgrade()`` or ``downgrade()`` method within revision scripts. """ self.impl.start_migrations() heads: Tuple[str, ...] if self.purge: if self.as_sql: raise util.CommandError("Can't use --purge with --sql mode") self._ensure_version_table(purge=True) heads = () else: heads = self.get_current_heads() dont_mutate = self.opts.get("dont_mutate", False) if not self.as_sql and not heads and not dont_mutate: self._ensure_version_table() head_maintainer = HeadMaintainer(self, heads) assert self._migrations_fn is not None for step in self._migrations_fn(heads, self): with self.begin_transaction(_per_migration=True): if self.as_sql and not head_maintainer.heads: # for offline mode, include a CREATE TABLE from # the base self._version.create(self.connection) log.info("Running %s", step) if self.as_sql: self.impl.static_output( "-- Running %s" % (step.short_log,) ) step.migration_fn(**kw) # previously, we wouldn't stamp per migration # if we were in a transaction, however given the more # complex model that involves any number of inserts # and row-targeted updates and deletes, it's simpler for now # just to run the operations on every version head_maintainer.update_to_step(step) for callback in self.on_version_apply_callbacks: callback( ctx=self, step=step.info, heads=set(head_maintainer.heads), run_args=kw, ) if self.as_sql and not head_maintainer.heads: self._version.drop(self.connection) def _in_connection_transaction(self) -> bool: try: meth = self.connection.in_transaction # type:ignore[union-attr] except AttributeError: return False else: return meth() def execute(self, sql: str, execution_options: None = None) -> None: """Execute a SQL construct or string statement. The underlying execution mechanics are used, that is if this is "offline mode" the SQL is written to the output buffer, otherwise the SQL is emitted on the current SQLAlchemy connection. """ self.impl._exec(sql, execution_options) def _stdout_connection( self, connection: Optional["Connection"] ) -> "MockConnection": def dump(construct, *multiparams, **params): self.impl._exec(construct) return MockEngineStrategy.MockConnection(self.dialect, dump) @property def bind(self) -> Optional["Connection"]: """Return the current "bind". In online mode, this is an instance of :class:`sqlalchemy.engine.Connection`, and is suitable for ad-hoc execution of any kind of usage described in :ref:`sqlexpression_toplevel` as well as for usage with the :meth:`sqlalchemy.schema.Table.create` and :meth:`sqlalchemy.schema.MetaData.create_all` methods of :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.MetaData`. Note that when "standard output" mode is enabled, this bind will be a "mock" connection handler that cannot return results and is only appropriate for a very limited subset of commands. """ return self.connection @property def config(self) -> Optional["Config"]: """Return the :class:`.Config` used by the current environment, if any.""" if self.environment_context: return self.environment_context.config else: return None def _compare_type( self, inspector_column: "Column", metadata_column: "Column" ) -> bool: if self._user_compare_type is False: return False if callable(self._user_compare_type): user_value = self._user_compare_type( self, inspector_column, metadata_column, inspector_column.type, metadata_column.type, ) if user_value is not None: return user_value return self.impl.compare_type(inspector_column, metadata_column) def _compare_server_default( self, inspector_column: "Column", metadata_column: "Column", rendered_metadata_default: Optional[str], rendered_column_default: Optional[str], ) -> bool: if self._user_compare_server_default is False: return False if callable(self._user_compare_server_default): user_value = self._user_compare_server_default( self, inspector_column, metadata_column, rendered_column_default, metadata_column.server_default, rendered_metadata_default, ) if user_value is not None: return user_value return self.impl.compare_server_default( inspector_column, metadata_column, rendered_metadata_default, rendered_column_default, ) class HeadMaintainer: def __init__(self, context: "MigrationContext", heads: Any) -> None: self.context = context self.heads = set(heads) def _insert_version(self, version: str) -> None: assert version not in self.heads self.heads.add(version) self.context.impl._exec( self.context._version.insert().values( version_num=literal_column("'%s'" % version) ) ) def _delete_version(self, version: str) -> None: self.heads.remove(version) ret = self.context.impl._exec( self.context._version.delete().where( self.context._version.c.version_num == literal_column("'%s'" % version) ) ) if ( not self.context.as_sql and self.context.dialect.supports_sane_rowcount and ret.rowcount != 1 ): raise util.CommandError( "Online migration expected to match one " "row when deleting '%s' in '%s'; " "%d found" % (version, self.context.version_table, ret.rowcount) ) def _update_version(self, from_: str, to_: str) -> None: assert to_ not in self.heads self.heads.remove(from_) self.heads.add(to_) ret = self.context.impl._exec( self.context._version.update() .values(version_num=literal_column("'%s'" % to_)) .where( self.context._version.c.version_num == literal_column("'%s'" % from_) ) ) if ( not self.context.as_sql and self.context.dialect.supports_sane_rowcount and ret.rowcount != 1 ): raise util.CommandError( "Online migration expected to match one " "row when updating '%s' to '%s' in '%s'; " "%d found" % (from_, to_, self.context.version_table, ret.rowcount) ) def update_to_step(self, step: Union["RevisionStep", "StampStep"]) -> None: if step.should_delete_branch(self.heads): vers = step.delete_version_num log.debug("branch delete %s", vers) self._delete_version(vers) elif step.should_create_branch(self.heads): vers = step.insert_version_num log.debug("new branch insert %s", vers) self._insert_version(vers) elif step.should_merge_branches(self.heads): # delete revs, update from rev, update to rev ( delete_revs, update_from_rev, update_to_rev, ) = step.merge_branch_idents(self.heads) log.debug( "merge, delete %s, update %s to %s", delete_revs, update_from_rev, update_to_rev, ) for delrev in delete_revs: self._delete_version(delrev) self._update_version(update_from_rev, update_to_rev) elif step.should_unmerge_branches(self.heads): ( update_from_rev, update_to_rev, insert_revs, ) = step.unmerge_branch_idents(self.heads) log.debug( "unmerge, insert %s, update %s to %s", insert_revs, update_from_rev, update_to_rev, ) for insrev in insert_revs: self._insert_version(insrev) self._update_version(update_from_rev, update_to_rev) else: from_, to_ = step.update_version_num(self.heads) log.debug("update %s to %s", from_, to_) self._update_version(from_, to_) class MigrationInfo: """Exposes information about a migration step to a callback listener. The :class:`.MigrationInfo` object is available exclusively for the benefit of the :paramref:`.EnvironmentContext.on_version_apply` callback hook. """ is_upgrade: bool = None # type:ignore[assignment] """True/False: indicates whether this operation ascends or descends the version tree.""" is_stamp: bool = None # type:ignore[assignment] """True/False: indicates whether this operation is a stamp (i.e. whether it results in any actual database operations).""" up_revision_id: Optional[str] = None """Version string corresponding to :attr:`.Revision.revision`. In the case of a stamp operation, it is advised to use the :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can make a single movement from one or more branches down to a single branchpoint, in which case there will be multiple "up" revisions. .. seealso:: :attr:`.MigrationInfo.up_revision_ids` """ up_revision_ids: Tuple[str, ...] = None # type:ignore[assignment] """Tuple of version strings corresponding to :attr:`.Revision.revision`. In the majority of cases, this tuple will be a single value, synonomous with the scalar value of :attr:`.MigrationInfo.up_revision_id`. It can be multiple revision identifiers only in the case of an ``alembic stamp`` operation which is moving downwards from multiple branches down to their common branch point. """ down_revision_ids: Tuple[str, ...] = None # type:ignore[assignment] """Tuple of strings representing the base revisions of this migration step. If empty, this represents a root revision; otherwise, the first item corresponds to :attr:`.Revision.down_revision`, and the rest are inferred from dependencies. """ revision_map: "RevisionMap" = None # type:ignore[assignment] """The revision map inside of which this operation occurs.""" def __init__( self, revision_map: "RevisionMap", is_upgrade: bool, is_stamp: bool, up_revisions: Union[str, Tuple[str, ...]], down_revisions: Union[str, Tuple[str, ...]], ) -> None: self.revision_map = revision_map self.is_upgrade = is_upgrade self.is_stamp = is_stamp self.up_revision_ids = util.to_tuple(up_revisions, default=()) if self.up_revision_ids: self.up_revision_id = self.up_revision_ids[0] else: # this should never be the case with # "upgrade", "downgrade", or "stamp" as we are always # measuring movement in terms of at least one upgrade version self.up_revision_id = None self.down_revision_ids = util.to_tuple(down_revisions, default=()) @property def is_migration(self) -> bool: """True/False: indicates whether this operation is a migration. At present this is true if and only the migration is not a stamp. If other operation types are added in the future, both this attribute and :attr:`~.MigrationInfo.is_stamp` will be false. """ return not self.is_stamp @property def source_revision_ids(self) -> Tuple[str, ...]: """Active revisions before this migration step is applied.""" return ( self.down_revision_ids if self.is_upgrade else self.up_revision_ids ) @property def destination_revision_ids(self) -> Tuple[str, ...]: """Active revisions after this migration step is applied.""" return ( self.up_revision_ids if self.is_upgrade else self.down_revision_ids ) @property def up_revision(self) -> "Revision": """Get :attr:`~.MigrationInfo.up_revision_id` as a :class:`.Revision`. """ return self.revision_map.get_revision(self.up_revision_id) @property def up_revisions(self) -> Tuple["Revision", ...]: """Get :attr:`~.MigrationInfo.up_revision_ids` as a :class:`.Revision`.""" return self.revision_map.get_revisions(self.up_revision_ids) @property def down_revisions(self) -> Tuple["Revision", ...]: """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of :class:`Revisions <.Revision>`.""" return self.revision_map.get_revisions(self.down_revision_ids) @property def source_revisions(self) -> Tuple["Revision", ...]: """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of :class:`Revisions <.Revision>`.""" return self.revision_map.get_revisions(self.source_revision_ids) @property def destination_revisions(self) -> Tuple["Revision", ...]: """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of :class:`Revisions <.Revision>`.""" return self.revision_map.get_revisions(self.destination_revision_ids) class MigrationStep: from_revisions_no_deps: Tuple[str, ...] to_revisions_no_deps: Tuple[str, ...] is_upgrade: bool migration_fn: Any @property def name(self) -> str: return self.migration_fn.__name__ @classmethod def upgrade_from_script( cls, revision_map: "RevisionMap", script: "Script" ) -> "RevisionStep": return RevisionStep(revision_map, script, True) @classmethod def downgrade_from_script( cls, revision_map: "RevisionMap", script: "Script" ) -> "RevisionStep": return RevisionStep(revision_map, script, False) @property def is_downgrade(self) -> bool: return not self.is_upgrade @property def short_log(self) -> str: return "%s %s -> %s" % ( self.name, util.format_as_comma(self.from_revisions_no_deps), util.format_as_comma(self.to_revisions_no_deps), ) def __str__(self): if self.doc: return "%s %s -> %s, %s" % ( self.name, util.format_as_comma(self.from_revisions_no_deps), util.format_as_comma(self.to_revisions_no_deps), self.doc, ) else: return self.short_log class RevisionStep(MigrationStep): def __init__( self, revision_map: "RevisionMap", revision: "Script", is_upgrade: bool ) -> None: self.revision_map = revision_map self.revision = revision self.is_upgrade = is_upgrade if is_upgrade: self.migration_fn = ( revision.module.upgrade # type:ignore[attr-defined] ) else: self.migration_fn = ( revision.module.downgrade # type:ignore[attr-defined] ) def __repr__(self): return "RevisionStep(%r, is_upgrade=%r)" % ( self.revision.revision, self.is_upgrade, ) def __eq__(self, other: object) -> bool: return ( isinstance(other, RevisionStep) and other.revision == self.revision and self.is_upgrade == other.is_upgrade ) @property def doc(self): return self.revision.doc @property def from_revisions(self) -> Tuple[str, ...]: if self.is_upgrade: return self.revision._normalized_down_revisions else: return (self.revision.revision,) @property def from_revisions_no_deps( # type:ignore[override] self, ) -> Tuple[str, ...]: if self.is_upgrade: return self.revision._versioned_down_revisions else: return (self.revision.revision,) @property def to_revisions(self) -> Tuple[str, ...]: if self.is_upgrade: return (self.revision.revision,) else: return self.revision._normalized_down_revisions @property def to_revisions_no_deps( # type:ignore[override] self, ) -> Tuple[str, ...]: if self.is_upgrade: return (self.revision.revision,) else: return self.revision._versioned_down_revisions @property def _has_scalar_down_revision(self) -> bool: return len(self.revision._normalized_down_revisions) == 1 def should_delete_branch(self, heads: Set[str]) -> bool: """A delete is when we are a. in a downgrade and b. we are going to the "base" or we are going to a version that is implied as a dependency on another version that is remaining. """ if not self.is_downgrade: return False if self.revision.revision not in heads: return False downrevs = self.revision._normalized_down_revisions if not downrevs: # is a base return True else: # determine what the ultimate "to_revisions" for an # unmerge would be. If there are none, then we're a delete. to_revisions = self._unmerge_to_revisions(heads) return not to_revisions def merge_branch_idents( self, heads: Set[str] ) -> Tuple[List[str], str, str]: other_heads = set(heads).difference(self.from_revisions) if other_heads: ancestors = set( r.revision for r in self.revision_map._get_ancestor_nodes( self.revision_map.get_revisions(other_heads), check=False ) ) from_revisions = list( set(self.from_revisions).difference(ancestors) ) else: from_revisions = list(self.from_revisions) return ( # delete revs, update from rev, update to rev list(from_revisions[0:-1]), from_revisions[-1], self.to_revisions[0], ) def _unmerge_to_revisions(self, heads: Collection[str]) -> Tuple[str, ...]: other_heads = set(heads).difference([self.revision.revision]) if other_heads: ancestors = set( r.revision for r in self.revision_map._get_ancestor_nodes( self.revision_map.get_revisions(other_heads), check=False ) ) return tuple(set(self.to_revisions).difference(ancestors)) else: return self.to_revisions def unmerge_branch_idents( self, heads: Collection[str] ) -> Tuple[str, str, Tuple[str, ...]]: to_revisions = self._unmerge_to_revisions(heads) return ( # update from rev, update to rev, insert revs self.from_revisions[0], to_revisions[-1], to_revisions[0:-1], ) def should_create_branch(self, heads: Set[str]) -> bool: if not self.is_upgrade: return False downrevs = self.revision._normalized_down_revisions if not downrevs: # is a base return True else: # none of our downrevs are present, so... # we have to insert our version. This is true whether # or not there is only one downrev, or multiple (in the latter # case, we're a merge point.) if not heads.intersection(downrevs): return True else: return False def should_merge_branches(self, heads: Set[str]) -> bool: if not self.is_upgrade: return False downrevs = self.revision._normalized_down_revisions if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1: return True return False def should_unmerge_branches(self, heads: Set[str]) -> bool: if not self.is_downgrade: return False downrevs = self.revision._normalized_down_revisions if self.revision.revision in heads and len(downrevs) > 1: return True return False def update_version_num(self, heads: Set[str]) -> Tuple[str, str]: if not self._has_scalar_down_revision: downrev = heads.intersection( self.revision._normalized_down_revisions ) assert ( len(downrev) == 1 ), "Can't do an UPDATE because downrevision is ambiguous" down_revision = list(downrev)[0] else: down_revision = self.revision._normalized_down_revisions[0] if self.is_upgrade: return down_revision, self.revision.revision else: return self.revision.revision, down_revision @property def delete_version_num(self) -> str: return self.revision.revision @property def insert_version_num(self) -> str: return self.revision.revision @property def info(self) -> "MigrationInfo": return MigrationInfo( revision_map=self.revision_map, up_revisions=self.revision.revision, down_revisions=self.revision._normalized_down_revisions, is_upgrade=self.is_upgrade, is_stamp=False, ) class StampStep(MigrationStep): def __init__( self, from_: Optional[Union[str, Collection[str]]], to_: Optional[Union[str, Collection[str]]], is_upgrade: bool, branch_move: bool, revision_map: Optional["RevisionMap"] = None, ) -> None: self.from_: Tuple[str, ...] = util.to_tuple(from_, default=()) self.to_: Tuple[str, ...] = util.to_tuple(to_, default=()) self.is_upgrade = is_upgrade self.branch_move = branch_move self.migration_fn = self.stamp_revision self.revision_map = revision_map doc = None def stamp_revision(self, **kw) -> None: return None def __eq__(self, other): return ( isinstance(other, StampStep) and other.from_revisions == self.revisions and other.to_revisions == self.to_revisions and other.branch_move == self.branch_move and self.is_upgrade == other.is_upgrade ) @property def from_revisions(self): return self.from_ @property def to_revisions(self) -> Tuple[str, ...]: return self.to_ @property def from_revisions_no_deps( # type:ignore[override] self, ) -> Tuple[str, ...]: return self.from_ @property def to_revisions_no_deps( # type:ignore[override] self, ) -> Tuple[str, ...]: return self.to_ @property def delete_version_num(self) -> str: assert len(self.from_) == 1 return self.from_[0] @property def insert_version_num(self) -> str: assert len(self.to_) == 1 return self.to_[0] def update_version_num(self, heads: Set[str]) -> Tuple[str, str]: assert len(self.from_) == 1 assert len(self.to_) == 1 return self.from_[0], self.to_[0] def merge_branch_idents( self, heads: Union[Set[str], List[str]] ) -> Union[Tuple[List[Any], str, str], Tuple[List[str], str, str]]: return ( # delete revs, update from rev, update to rev list(self.from_[0:-1]), self.from_[-1], self.to_[0], ) def unmerge_branch_idents( self, heads: Set[str] ) -> Tuple[str, str, List[str]]: return ( # update from rev, update to rev, insert revs self.from_[0], self.to_[-1], list(self.to_[0:-1]), ) def should_delete_branch(self, heads: Set[str]) -> bool: # TODO: we probably need to look for self.to_ inside of heads, # in a similar manner as should_create_branch, however we have # no tests for this yet (stamp downgrades w/ branches) return self.is_downgrade and self.branch_move def should_create_branch(self, heads: Set[str]) -> Union[Set[str], bool]: return ( self.is_upgrade and (self.branch_move or set(self.from_).difference(heads)) and set(self.to_).difference(heads) ) def should_merge_branches(self, heads: Set[str]) -> bool: return len(self.from_) > 1 def should_unmerge_branches(self, heads: Set[str]) -> bool: return len(self.to_) > 1 @property def info(self) -> "MigrationInfo": up, down = ( (self.to_, self.from_) if self.is_upgrade else (self.from_, self.to_) ) assert self.revision_map is not None return MigrationInfo( revision_map=self.revision_map, up_revisions=up, down_revisions=down, is_upgrade=self.is_upgrade, is_stamp=True, ) alembic-rel_1_7_6/alembic/script/000077500000000000000000000000001417624537100170375ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/script/__init__.py000066400000000000000000000001441417624537100211470ustar00rootroot00000000000000from .base import Script from .base import ScriptDirectory __all__ = ["ScriptDirectory", "Script"] alembic-rel_1_7_6/alembic/script/base.py000066400000000000000000001056571417624537100203410ustar00rootroot00000000000000from contextlib import contextmanager import datetime import os import re import shutil import sys from types import ModuleType from typing import Any from typing import cast from typing import Dict from typing import Iterator from typing import List from typing import Optional from typing import Sequence from typing import Set from typing import Tuple from typing import TYPE_CHECKING from typing import Union from . import revision from . import write_hooks from .. import util from ..runtime import migration if TYPE_CHECKING: from ..config import Config from ..runtime.migration import RevisionStep from ..runtime.migration import StampStep try: from dateutil import tz except ImportError: tz = None # type: ignore[assignment] _RevIdType = Union[str, Sequence[str]] _sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$") _only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$") _legacy_rev = re.compile(r"([a-f0-9]+)\.py$") _mod_def_re = re.compile(r"(upgrade|downgrade)_([a-z0-9]+)") _slug_re = re.compile(r"\w+") _default_file_template = "%(rev)s_%(slug)s" _split_on_space_comma = re.compile(r", *|(?: +)") _split_on_space_comma_colon = re.compile(r", *|(?: +)|\:") class ScriptDirectory: """Provides operations upon an Alembic script directory. This object is useful to get information as to current revisions, most notably being able to get at the "head" revision, for schemes that want to test if the current revision in the database is the most recent:: from alembic.script import ScriptDirectory from alembic.config import Config config = Config() config.set_main_option("script_location", "myapp:migrations") script = ScriptDirectory.from_config(config) head_revision = script.get_current_head() """ def __init__( self, dir: str, # noqa file_template: str = _default_file_template, truncate_slug_length: Optional[int] = 40, version_locations: Optional[List[str]] = None, sourceless: bool = False, output_encoding: str = "utf-8", timezone: Optional[str] = None, hook_config: Optional[Dict[str, str]] = None, ) -> None: self.dir = dir self.file_template = file_template self.version_locations = version_locations self.truncate_slug_length = truncate_slug_length or 40 self.sourceless = sourceless self.output_encoding = output_encoding self.revision_map = revision.RevisionMap(self._load_revisions) self.timezone = timezone self.hook_config = hook_config if not os.access(dir, os.F_OK): raise util.CommandError( "Path doesn't exist: %r. Please use " "the 'init' command to create a new " "scripts folder." % os.path.abspath(dir) ) @property def versions(self) -> str: loc = self._version_locations if len(loc) > 1: raise util.CommandError("Multiple version_locations present") else: return loc[0] @util.memoized_property def _version_locations(self): if self.version_locations: return [ os.path.abspath(util.coerce_resource_to_filename(location)) for location in self.version_locations ] else: return (os.path.abspath(os.path.join(self.dir, "versions")),) def _load_revisions(self) -> Iterator["Script"]: if self.version_locations: paths = [ vers for vers in self._version_locations if os.path.exists(vers) ] else: paths = [self.versions] dupes = set() for vers in paths: for file_ in Script._list_py_dir(self, vers): path = os.path.realpath(os.path.join(vers, file_)) if path in dupes: util.warn( "File %s loaded twice! ignoring. Please ensure " "version_locations is unique." % path ) continue dupes.add(path) script = Script._from_filename(self, vers, file_) if script is None: continue yield script @classmethod def from_config(cls, config: "Config") -> "ScriptDirectory": """Produce a new :class:`.ScriptDirectory` given a :class:`.Config` instance. The :class:`.Config` need only have the ``script_location`` key present. """ script_location = config.get_main_option("script_location") if script_location is None: raise util.CommandError( "No 'script_location' key " "found in configuration." ) truncate_slug_length = cast( Optional[int], config.get_main_option("truncate_slug_length") ) if truncate_slug_length is not None: truncate_slug_length = int(truncate_slug_length) version_locations = config.get_main_option("version_locations") if version_locations: version_path_separator = config.get_main_option( "version_path_separator" ) split_on_path = { None: None, "space": " ", "os": os.pathsep, ":": ":", ";": ";", } try: split_char = split_on_path[version_path_separator] except KeyError as ke: raise ValueError( "'%s' is not a valid value for " "version_path_separator; " "expected 'space', 'os', ':', ';'" % version_path_separator ) from ke else: if split_char is None: # legacy behaviour for backwards compatibility vl = _split_on_space_comma.split( cast(str, version_locations) ) version_locations: List[str] = vl # type: ignore[no-redef] else: vl = [ x for x in cast(str, version_locations).split(split_char) if x ] version_locations: List[str] = vl # type: ignore[no-redef] prepend_sys_path = config.get_main_option("prepend_sys_path") if prepend_sys_path: sys.path[:0] = list( _split_on_space_comma_colon.split(prepend_sys_path) ) return ScriptDirectory( util.coerce_resource_to_filename(script_location), file_template=config.get_main_option( "file_template", _default_file_template ), truncate_slug_length=truncate_slug_length, sourceless=config.get_main_option("sourceless") == "true", output_encoding=config.get_main_option("output_encoding", "utf-8"), version_locations=cast("Optional[List[str]]", version_locations), timezone=config.get_main_option("timezone"), hook_config=config.get_section("post_write_hooks", {}), ) @contextmanager def _catch_revision_errors( self, ancestor: Optional[str] = None, multiple_heads: Optional[str] = None, start: Optional[str] = None, end: Optional[str] = None, resolution: Optional[str] = None, ) -> Iterator[None]: try: yield except revision.RangeNotAncestorError as rna: if start is None: start = cast(Any, rna.lower) if end is None: end = cast(Any, rna.upper) if not ancestor: ancestor = ( "Requested range %(start)s:%(end)s does not refer to " "ancestor/descendant revisions along the same branch" ) ancestor = ancestor % {"start": start, "end": end} raise util.CommandError(ancestor) from rna except revision.MultipleHeads as mh: if not multiple_heads: multiple_heads = ( "Multiple head revisions are present for given " "argument '%(head_arg)s'; please " "specify a specific target revision, " "'@%(head_arg)s' to " "narrow to a specific head, or 'heads' for all heads" ) multiple_heads = multiple_heads % { "head_arg": end or mh.argument, "heads": util.format_as_comma(mh.heads), } raise util.CommandError(multiple_heads) from mh except revision.ResolutionError as re: if resolution is None: resolution = "Can't locate revision identified by '%s'" % ( re.argument ) raise util.CommandError(resolution) from re except revision.RevisionError as err: raise util.CommandError(err.args[0]) from err def walk_revisions( self, base: str = "base", head: str = "heads" ) -> Iterator["Script"]: """Iterate through all revisions. :param base: the base revision, or "base" to start from the empty revision. :param head: the head revision; defaults to "heads" to indicate all head revisions. May also be "head" to indicate a single head revision. """ with self._catch_revision_errors(start=base, end=head): for rev in self.revision_map.iterate_revisions( head, base, inclusive=True, assert_relative_length=False ): yield cast(Script, rev) def get_revisions(self, id_: _RevIdType) -> Tuple["Script", ...]: """Return the :class:`.Script` instance with the given rev identifier, symbolic name, or sequence of identifiers. """ with self._catch_revision_errors(): return cast( "Tuple[Script, ...]", self.revision_map.get_revisions(id_) ) def get_all_current(self, id_: Tuple[str, ...]) -> Set["Script"]: with self._catch_revision_errors(): top_revs = cast( "Set[Script]", set(self.revision_map.get_revisions(id_)), ) top_revs.update( cast( "Iterator[Script]", self.revision_map._get_ancestor_nodes( list(top_revs), include_dependencies=True ), ) ) top_revs = self.revision_map._filter_into_branch_heads(top_revs) return top_revs def get_revision(self, id_: str) -> "Script": """Return the :class:`.Script` instance with the given rev id. .. seealso:: :meth:`.ScriptDirectory.get_revisions` """ with self._catch_revision_errors(): return cast(Script, self.revision_map.get_revision(id_)) def as_revision_number( self, id_: Optional[str] ) -> Optional[Union[str, Tuple[str, ...]]]: """Convert a symbolic revision, i.e. 'head' or 'base', into an actual revision number.""" with self._catch_revision_errors(): rev, branch_name = self.revision_map._resolve_revision_number(id_) if not rev: # convert () to None return None elif id_ == "heads": return rev else: return rev[0] def iterate_revisions(self, upper, lower): """Iterate through script revisions, starting at the given upper revision identifier and ending at the lower. The traversal uses strictly the `down_revision` marker inside each migration script, so it is a requirement that upper >= lower, else you'll get nothing back. The iterator yields :class:`.Script` objects. .. seealso:: :meth:`.RevisionMap.iterate_revisions` """ return self.revision_map.iterate_revisions(upper, lower) def get_current_head(self): """Return the current head revision. If the script directory has multiple heads due to branching, an error is raised; :meth:`.ScriptDirectory.get_heads` should be preferred. :return: a string revision number. .. seealso:: :meth:`.ScriptDirectory.get_heads` """ with self._catch_revision_errors( multiple_heads=( "The script directory has multiple heads (due to branching)." "Please use get_heads(), or merge the branches using " "alembic merge." ) ): return self.revision_map.get_current_head() def get_heads(self) -> List[str]: """Return all "versioned head" revisions as strings. This is normally a list of length one, unless branches are present. The :meth:`.ScriptDirectory.get_current_head()` method can be used normally when a script directory has only one head. :return: a tuple of string revision numbers. """ return list(self.revision_map.heads) def get_base(self) -> Optional[str]: """Return the "base" revision as a string. This is the revision number of the script that has a ``down_revision`` of None. If the script directory has multiple bases, an error is raised; :meth:`.ScriptDirectory.get_bases` should be preferred. """ bases = self.get_bases() if len(bases) > 1: raise util.CommandError( "The script directory has multiple bases. " "Please use get_bases()." ) elif bases: return bases[0] else: return None def get_bases(self) -> List[str]: """return all "base" revisions as strings. This is the revision number of all scripts that have a ``down_revision`` of None. """ return list(self.revision_map.bases) def _upgrade_revs( self, destination: str, current_rev: str ) -> List["RevisionStep"]: with self._catch_revision_errors( ancestor="Destination %(end)s is not a valid upgrade " "target from current head(s)", end=destination, ): revs = self.revision_map.iterate_revisions( destination, current_rev, implicit_base=True ) return [ migration.MigrationStep.upgrade_from_script( self.revision_map, cast(Script, script) ) for script in reversed(list(revs)) ] def _downgrade_revs( self, destination: str, current_rev: Optional[str] ) -> List["RevisionStep"]: with self._catch_revision_errors( ancestor="Destination %(end)s is not a valid downgrade " "target from current head(s)", end=destination, ): revs = self.revision_map.iterate_revisions( current_rev, destination, select_for_downgrade=True ) return [ migration.MigrationStep.downgrade_from_script( self.revision_map, cast(Script, script) ) for script in revs ] def _stamp_revs( self, revision: _RevIdType, heads: _RevIdType ) -> List["StampStep"]: with self._catch_revision_errors( multiple_heads="Multiple heads are present; please specify a " "single target revision" ): heads_revs = self.get_revisions(heads) steps = [] if not revision: revision = "base" filtered_heads: List["Script"] = [] for rev in util.to_tuple(revision): if rev: filtered_heads.extend( self.revision_map.filter_for_lineage( heads_revs, rev, include_dependencies=True ) ) filtered_heads = util.unique_list(filtered_heads) dests = self.get_revisions(revision) or [None] for dest in dests: if dest is None: # dest is 'base'. Return a "delete branch" migration # for all applicable heads. steps.extend( [ migration.StampStep( head.revision, None, False, True, self.revision_map, ) for head in filtered_heads ] ) continue elif dest in filtered_heads: # the dest is already in the version table, do nothing. continue # figure out if the dest is a descendant or an # ancestor of the selected nodes descendants = set( self.revision_map._get_descendant_nodes([dest]) ) ancestors = set(self.revision_map._get_ancestor_nodes([dest])) if descendants.intersection(filtered_heads): # heads are above the target, so this is a downgrade. # we can treat them as a "merge", single step. assert not ancestors.intersection(filtered_heads) todo_heads = [head.revision for head in filtered_heads] step = migration.StampStep( todo_heads, dest.revision, False, False, self.revision_map, ) steps.append(step) continue elif ancestors.intersection(filtered_heads): # heads are below the target, so this is an upgrade. # we can treat them as a "merge", single step. todo_heads = [head.revision for head in filtered_heads] step = migration.StampStep( todo_heads, dest.revision, True, False, self.revision_map, ) steps.append(step) continue else: # destination is in a branch not represented, # treat it as new branch step = migration.StampStep( (), dest.revision, True, True, self.revision_map ) steps.append(step) continue return steps def run_env(self) -> None: """Run the script environment. This basically runs the ``env.py`` script present in the migration environment. It is called exclusively by the command functions in :mod:`alembic.command`. """ util.load_python_file(self.dir, "env.py") @property def env_py_location(self): return os.path.abspath(os.path.join(self.dir, "env.py")) def _generate_template(self, src: str, dest: str, **kw: Any) -> None: util.status( "Generating %s" % os.path.abspath(dest), util.template_to_file, src, dest, self.output_encoding, **kw ) def _copy_file(self, src: str, dest: str) -> None: util.status( "Generating %s" % os.path.abspath(dest), shutil.copy, src, dest ) def _ensure_directory(self, path: str) -> None: path = os.path.abspath(path) if not os.path.exists(path): util.status("Creating directory %s" % path, os.makedirs, path) def _generate_create_date(self) -> "datetime.datetime": if self.timezone is not None: if tz is None: raise util.CommandError( "The library 'python-dateutil' is required " "for timezone support" ) # First, assume correct capitalization tzinfo = tz.gettz(self.timezone) if tzinfo is None: # Fall back to uppercase tzinfo = tz.gettz(self.timezone.upper()) if tzinfo is None: raise util.CommandError( "Can't locate timezone: %s" % self.timezone ) create_date = ( datetime.datetime.utcnow() .replace(tzinfo=tz.tzutc()) .astimezone(tzinfo) ) else: create_date = datetime.datetime.now() return create_date def generate_revision( self, revid: str, message: Optional[str], head: Optional[str] = None, refresh: bool = False, splice: Optional[bool] = False, branch_labels: Optional[str] = None, version_path: Optional[str] = None, depends_on: Optional[_RevIdType] = None, **kw: Any ) -> Optional["Script"]: """Generate a new revision file. This runs the ``script.py.mako`` template, given template arguments, and creates a new file. :param revid: String revision id. Typically this comes from ``alembic.util.rev_id()``. :param message: the revision message, the one passed by the -m argument to the ``revision`` command. :param head: the head revision to generate against. Defaults to the current "head" if no branches are present, else raises an exception. :param splice: if True, allow the "head" version to not be an actual head; otherwise, the selected head must be a head (e.g. endpoint) revision. :param refresh: deprecated. """ if head is None: head = "head" try: Script.verify_rev_id(revid) except revision.RevisionError as err: raise util.CommandError(err.args[0]) from err with self._catch_revision_errors( multiple_heads=( "Multiple heads are present; please specify the head " "revision on which the new revision should be based, " "or perform a merge." ) ): heads = self.revision_map.get_revisions(head) if len(set(heads)) != len(heads): raise util.CommandError("Duplicate head revisions specified") create_date = self._generate_create_date() if version_path is None: if len(self._version_locations) > 1: for head_ in heads: if head_ is not None: assert isinstance(head_, Script) version_path = os.path.dirname(head_.path) break else: raise util.CommandError( "Multiple version locations present, " "please specify --version-path" ) else: version_path = self.versions norm_path = os.path.normpath(os.path.abspath(version_path)) for vers_path in self._version_locations: if os.path.normpath(vers_path) == norm_path: break else: raise util.CommandError( "Path %s is not represented in current " "version locations" % version_path ) if self.version_locations: self._ensure_directory(version_path) path = self._rev_path(version_path, revid, message, create_date) if not splice: for head_ in heads: if head_ is not None and not head_.is_head: raise util.CommandError( "Revision %s is not a head revision; please specify " "--splice to create a new branch from this revision" % head_.revision ) if depends_on: with self._catch_revision_errors(): depends_on = [ dep if dep in rev.branch_labels # maintain branch labels else rev.revision # resolve partial revision identifiers for rev, dep in [ (self.revision_map.get_revision(dep), dep) for dep in util.to_list(depends_on) ] ] self._generate_template( os.path.join(self.dir, "script.py.mako"), path, up_revision=str(revid), down_revision=revision.tuple_rev_as_scalar( tuple(h.revision if h is not None else None for h in heads) ), branch_labels=util.to_tuple(branch_labels), depends_on=revision.tuple_rev_as_scalar( cast("Optional[List[str]]", depends_on) ), create_date=create_date, comma=util.format_as_comma, message=message if message is not None else ("empty message"), **kw ) post_write_hooks = self.hook_config if post_write_hooks: write_hooks._run_hooks(path, post_write_hooks) try: script = Script._from_path(self, path) except revision.RevisionError as err: raise util.CommandError(err.args[0]) from err if script is None: return None if branch_labels and not script.branch_labels: raise util.CommandError( "Version %s specified branch_labels %s, however the " "migration file %s does not have them; have you upgraded " "your script.py.mako to include the " "'branch_labels' section?" % (script.revision, branch_labels, script.path) ) self.revision_map.add_revision(script) return script def _rev_path( self, path: str, rev_id: str, message: Optional[str], create_date: "datetime.datetime", ) -> str: slug = "_".join(_slug_re.findall(message or "")).lower() if len(slug) > self.truncate_slug_length: slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_" filename = "%s.py" % ( self.file_template % { "rev": rev_id, "slug": slug, "year": create_date.year, "month": create_date.month, "day": create_date.day, "hour": create_date.hour, "minute": create_date.minute, "second": create_date.second, } ) return os.path.join(path, filename) class Script(revision.Revision): """Represent a single revision file in a ``versions/`` directory. The :class:`.Script` instance is returned by methods such as :meth:`.ScriptDirectory.iterate_revisions`. """ def __init__(self, module: ModuleType, rev_id: str, path: str): self.module = module self.path = path super(Script, self).__init__( rev_id, module.down_revision, # type: ignore[attr-defined] branch_labels=util.to_tuple( getattr(module, "branch_labels", None), default=() ), dependencies=util.to_tuple( getattr(module, "depends_on", None), default=() ), ) module: ModuleType = None # type: ignore[assignment] """The Python module representing the actual script itself.""" path: str = None # type: ignore[assignment] """Filesystem path of the script.""" _db_current_indicator = None """Utility variable which when set will cause string output to indicate this is a "current" version in some database""" @property def doc(self) -> str: """Return the docstring given in the script.""" return re.split("\n\n", self.longdoc)[0] @property def longdoc(self) -> str: """Return the docstring given in the script.""" doc = self.module.__doc__ if doc: if hasattr(self.module, "_alembic_source_encoding"): doc = doc.decode( # type: ignore[attr-defined] self.module._alembic_source_encoding # type: ignore[attr-defined] # noqa ) return doc.strip() # type: ignore[union-attr] else: return "" @property def log_entry(self) -> str: entry = "Rev: %s%s%s%s%s\n" % ( self.revision, " (head)" if self.is_head else "", " (branchpoint)" if self.is_branch_point else "", " (mergepoint)" if self.is_merge_point else "", " (current)" if self._db_current_indicator else "", ) if self.is_merge_point: entry += "Merges: %s\n" % (self._format_down_revision(),) else: entry += "Parent: %s\n" % (self._format_down_revision(),) if self.dependencies: entry += "Also depends on: %s\n" % ( util.format_as_comma(self.dependencies) ) if self.is_branch_point: entry += "Branches into: %s\n" % ( util.format_as_comma(self.nextrev) ) if self.branch_labels: entry += "Branch names: %s\n" % ( util.format_as_comma(self.branch_labels), ) entry += "Path: %s\n" % (self.path,) entry += "\n%s\n" % ( "\n".join(" %s" % para for para in self.longdoc.splitlines()) ) return entry def __str__(self): return "%s -> %s%s%s%s, %s" % ( self._format_down_revision(), self.revision, " (head)" if self.is_head else "", " (branchpoint)" if self.is_branch_point else "", " (mergepoint)" if self.is_merge_point else "", self.doc, ) def _head_only( self, include_branches: bool = False, include_doc: bool = False, include_parents: bool = False, tree_indicators: bool = True, head_indicators: bool = True, ) -> str: text = self.revision if include_parents: if self.dependencies: text = "%s (%s) -> %s" % ( self._format_down_revision(), util.format_as_comma(self.dependencies), text, ) else: text = "%s -> %s" % (self._format_down_revision(), text) assert text is not None if include_branches and self.branch_labels: text += " (%s)" % util.format_as_comma(self.branch_labels) if head_indicators or tree_indicators: text += "%s%s%s" % ( " (head)" if self._is_real_head else "", " (effective head)" if self.is_head and not self._is_real_head else "", " (current)" if self._db_current_indicator else "", ) if tree_indicators: text += "%s%s" % ( " (branchpoint)" if self.is_branch_point else "", " (mergepoint)" if self.is_merge_point else "", ) if include_doc: text += ", %s" % self.doc return text def cmd_format( self, verbose: bool, include_branches: bool = False, include_doc: bool = False, include_parents: bool = False, tree_indicators: bool = True, ) -> str: if verbose: return self.log_entry else: return self._head_only( include_branches, include_doc, include_parents, tree_indicators ) def _format_down_revision(self) -> str: if not self.down_revision: return "" else: return util.format_as_comma(self._versioned_down_revisions) @classmethod def _from_path( cls, scriptdir: ScriptDirectory, path: str ) -> Optional["Script"]: dir_, filename = os.path.split(path) return cls._from_filename(scriptdir, dir_, filename) @classmethod def _list_py_dir(cls, scriptdir: ScriptDirectory, path: str) -> List[str]: if scriptdir.sourceless: # read files in version path, e.g. pyc or pyo files # in the immediate path paths = os.listdir(path) names = set(fname.split(".")[0] for fname in paths) # look for __pycache__ if os.path.exists(os.path.join(path, "__pycache__")): # add all files from __pycache__ whose filename is not # already in the names we got from the version directory. # add as relative paths including __pycache__ token paths.extend( os.path.join("__pycache__", pyc) for pyc in os.listdir(os.path.join(path, "__pycache__")) if pyc.split(".")[0] not in names ) return paths else: return os.listdir(path) @classmethod def _from_filename( cls, scriptdir: ScriptDirectory, dir_: str, filename: str ) -> Optional["Script"]: if scriptdir.sourceless: py_match = _sourceless_rev_file.match(filename) else: py_match = _only_source_rev_file.match(filename) if not py_match: return None py_filename = py_match.group(1) if scriptdir.sourceless: is_c = py_match.group(2) == "c" is_o = py_match.group(2) == "o" else: is_c = is_o = False if is_o or is_c: py_exists = os.path.exists(os.path.join(dir_, py_filename)) pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c")) # prefer .py over .pyc because we'd like to get the # source encoding; prefer .pyc over .pyo because we'd like to # have the docstrings which a -OO file would not have if py_exists or is_o and pyc_exists: return None module = util.load_python_file(dir_, filename) if not hasattr(module, "revision"): # attempt to get the revision id from the script name, # this for legacy only m = _legacy_rev.match(filename) if not m: raise util.CommandError( "Could not determine revision id from filename %s. " "Be sure the 'revision' variable is " "declared inside the script (please see 'Upgrading " "from Alembic 0.1 to 0.2' in the documentation)." % filename ) else: revision = m.group(1) else: revision = module.revision return Script(module, revision, os.path.join(dir_, filename)) alembic-rel_1_7_6/alembic/script/revision.py000066400000000000000000001632501417624537100212560ustar00rootroot00000000000000import collections import re from typing import Any from typing import Callable from typing import cast from typing import Collection from typing import Deque from typing import Dict from typing import FrozenSet from typing import Iterable from typing import Iterator from typing import List from typing import Optional from typing import Sequence from typing import Set from typing import Tuple from typing import TYPE_CHECKING from typing import TypeVar from typing import Union from sqlalchemy import util as sqlautil from .. import util if TYPE_CHECKING: from typing import Literal from .base import Script _RevIdType = Union[str, Sequence[str]] _RevisionIdentifierType = Union[str, Tuple[str, ...], None] _RevisionOrStr = Union["Revision", str] _RevisionOrBase = Union["Revision", "Literal['base']"] _InterimRevisionMapType = Dict[str, "Revision"] _RevisionMapType = Dict[Union[None, str, Tuple[()]], Optional["Revision"]] _T = TypeVar("_T", bound=Union[str, "Revision"]) _relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)") _revision_illegal_chars = ["@", "-", "+"] class RevisionError(Exception): pass class RangeNotAncestorError(RevisionError): def __init__( self, lower: _RevisionIdentifierType, upper: _RevisionIdentifierType ) -> None: self.lower = lower self.upper = upper super(RangeNotAncestorError, self).__init__( "Revision %s is not an ancestor of revision %s" % (lower or "base", upper or "base") ) class MultipleHeads(RevisionError): def __init__(self, heads: Sequence[str], argument: Optional[str]) -> None: self.heads = heads self.argument = argument super(MultipleHeads, self).__init__( "Multiple heads are present for given argument '%s'; " "%s" % (argument, ", ".join(heads)) ) class ResolutionError(RevisionError): def __init__(self, message: str, argument: str) -> None: super(ResolutionError, self).__init__(message) self.argument = argument class CycleDetected(RevisionError): kind = "Cycle" def __init__(self, revisions: Sequence[str]) -> None: self.revisions = revisions super(CycleDetected, self).__init__( "%s is detected in revisions (%s)" % (self.kind, ", ".join(revisions)) ) class DependencyCycleDetected(CycleDetected): kind = "Dependency cycle" def __init__(self, revisions: Sequence[str]) -> None: super(DependencyCycleDetected, self).__init__(revisions) class LoopDetected(CycleDetected): kind = "Self-loop" def __init__(self, revision: str) -> None: super(LoopDetected, self).__init__([revision]) class DependencyLoopDetected(DependencyCycleDetected, LoopDetected): kind = "Dependency self-loop" def __init__(self, revision: Sequence[str]) -> None: super(DependencyLoopDetected, self).__init__(revision) class RevisionMap: """Maintains a map of :class:`.Revision` objects. :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain and traverse the collection of :class:`.Script` objects, which are themselves instances of :class:`.Revision`. """ def __init__(self, generator: Callable[[], Iterable["Revision"]]) -> None: """Construct a new :class:`.RevisionMap`. :param generator: a zero-arg callable that will generate an iterable of :class:`.Revision` instances to be used. These are typically :class:`.Script` subclasses within regular Alembic use. """ self._generator = generator @util.memoized_property def heads(self) -> Tuple[str, ...]: """All "head" revisions as strings. This is normally a tuple of length one, unless unmerged branches are present. :return: a tuple of string revision numbers. """ self._revision_map return self.heads @util.memoized_property def bases(self) -> Tuple[str, ...]: """All "base" revisions as strings. These are revisions that have a ``down_revision`` of None, or empty tuple. :return: a tuple of string revision numbers. """ self._revision_map return self.bases @util.memoized_property def _real_heads(self) -> Tuple[str, ...]: """All "real" head revisions as strings. :return: a tuple of string revision numbers. """ self._revision_map return self._real_heads @util.memoized_property def _real_bases(self) -> Tuple[str, ...]: """All "real" base revisions as strings. :return: a tuple of string revision numbers. """ self._revision_map return self._real_bases @util.memoized_property def _revision_map(self) -> _RevisionMapType: """memoized attribute, initializes the revision map from the initial collection. """ # Ordering required for some tests to pass (but not required in # general) map_: _InterimRevisionMapType = sqlautil.OrderedDict() heads: Set["Revision"] = sqlautil.OrderedSet() _real_heads: Set["Revision"] = sqlautil.OrderedSet() bases: Tuple["Revision", ...] = () _real_bases: Tuple["Revision", ...] = () has_branch_labels = set() all_revisions = set() for revision in self._generator(): all_revisions.add(revision) if revision.revision in map_: util.warn( "Revision %s is present more than once" % revision.revision ) map_[revision.revision] = revision if revision.branch_labels: has_branch_labels.add(revision) heads.add(revision) _real_heads.add(revision) if revision.is_base: bases += (revision,) if revision._is_real_base: _real_bases += (revision,) # add the branch_labels to the map_. We'll need these # to resolve the dependencies. rev_map = map_.copy() self._map_branch_labels( has_branch_labels, cast(_RevisionMapType, map_) ) # resolve dependency names from branch labels and symbolic # names self._add_depends_on(all_revisions, cast(_RevisionMapType, map_)) for rev in map_.values(): for downrev in rev._all_down_revisions: if downrev not in map_: util.warn( "Revision %s referenced from %s is not present" % (downrev, rev) ) down_revision = map_[downrev] down_revision.add_nextrev(rev) if downrev in rev._versioned_down_revisions: heads.discard(down_revision) _real_heads.discard(down_revision) # once the map has downrevisions populated, the dependencies # can be further refined to include only those which are not # already ancestors self._normalize_depends_on(all_revisions, cast(_RevisionMapType, map_)) self._detect_cycles(rev_map, heads, bases, _real_heads, _real_bases) revision_map: _RevisionMapType = dict(map_.items()) revision_map[None] = revision_map[()] = None self.heads = tuple(rev.revision for rev in heads) self._real_heads = tuple(rev.revision for rev in _real_heads) self.bases = tuple(rev.revision for rev in bases) self._real_bases = tuple(rev.revision for rev in _real_bases) self._add_branches(has_branch_labels, revision_map) return revision_map def _detect_cycles( self, rev_map: _InterimRevisionMapType, heads: Set["Revision"], bases: Tuple["Revision", ...], _real_heads: Set["Revision"], _real_bases: Tuple["Revision", ...], ) -> None: if not rev_map: return if not heads or not bases: raise CycleDetected(list(rev_map)) total_space = { rev.revision for rev in self._iterate_related_revisions( lambda r: r._versioned_down_revisions, heads, map_=cast(_RevisionMapType, rev_map), ) }.intersection( rev.revision for rev in self._iterate_related_revisions( lambda r: r.nextrev, bases, map_=cast(_RevisionMapType, rev_map), ) ) deleted_revs = set(rev_map.keys()) - total_space if deleted_revs: raise CycleDetected(sorted(deleted_revs)) if not _real_heads or not _real_bases: raise DependencyCycleDetected(list(rev_map)) total_space = { rev.revision for rev in self._iterate_related_revisions( lambda r: r._all_down_revisions, _real_heads, map_=cast(_RevisionMapType, rev_map), ) }.intersection( rev.revision for rev in self._iterate_related_revisions( lambda r: r._all_nextrev, _real_bases, map_=cast(_RevisionMapType, rev_map), ) ) deleted_revs = set(rev_map.keys()) - total_space if deleted_revs: raise DependencyCycleDetected(sorted(deleted_revs)) def _map_branch_labels( self, revisions: Collection["Revision"], map_: _RevisionMapType ) -> None: for revision in revisions: if revision.branch_labels: assert revision._orig_branch_labels is not None for branch_label in revision._orig_branch_labels: if branch_label in map_: map_rev = map_[branch_label] assert map_rev is not None raise RevisionError( "Branch name '%s' in revision %s already " "used by revision %s" % ( branch_label, revision.revision, map_rev.revision, ) ) map_[branch_label] = revision def _add_branches( self, revisions: Collection["Revision"], map_: _RevisionMapType ) -> None: for revision in revisions: if revision.branch_labels: revision.branch_labels.update(revision.branch_labels) for node in self._get_descendant_nodes( [revision], map_, include_dependencies=False ): node.branch_labels.update(revision.branch_labels) parent = node while ( parent and not parent._is_real_branch_point and not parent.is_merge_point ): parent.branch_labels.update(revision.branch_labels) if parent.down_revision: parent = map_[parent.down_revision] else: break def _add_depends_on( self, revisions: Collection["Revision"], map_: _RevisionMapType ) -> None: """Resolve the 'dependencies' for each revision in a collection in terms of actual revision ids, as opposed to branch labels or other symbolic names. The collection is then assigned to the _resolved_dependencies attribute on each revision object. """ for revision in revisions: if revision.dependencies: deps = [ map_[dep] for dep in util.to_tuple(revision.dependencies) ] revision._resolved_dependencies = tuple( [d.revision for d in deps if d is not None] ) else: revision._resolved_dependencies = () def _normalize_depends_on( self, revisions: Collection["Revision"], map_: _RevisionMapType ) -> None: """Create a collection of "dependencies" that omits dependencies that are already ancestor nodes for each revision in a given collection. This builds upon the _resolved_dependencies collection created in the _add_depends_on() method, looking in the fully populated revision map for ancestors, and omitting them as the _resolved_dependencies collection as it is copied to a new collection. The new collection is then assigned to the _normalized_resolved_dependencies attribute on each revision object. The collection is then used to determine the immediate "down revision" identifiers for this revision. """ for revision in revisions: if revision._resolved_dependencies: normalized_resolved = set(revision._resolved_dependencies) for rev in self._get_ancestor_nodes( [revision], include_dependencies=False, map_=cast(_RevisionMapType, map_), ): if rev is revision: continue elif rev._resolved_dependencies: normalized_resolved.difference_update( rev._resolved_dependencies ) revision._normalized_resolved_dependencies = tuple( normalized_resolved ) else: revision._normalized_resolved_dependencies = () def add_revision( self, revision: "Revision", _replace: bool = False ) -> None: """add a single revision to an existing map. This method is for single-revision use cases, it's not appropriate for fully populating an entire revision map. """ map_ = self._revision_map if not _replace and revision.revision in map_: util.warn( "Revision %s is present more than once" % revision.revision ) elif _replace and revision.revision not in map_: raise Exception("revision %s not in map" % revision.revision) map_[revision.revision] = revision revisions = [revision] self._add_branches(revisions, map_) self._map_branch_labels(revisions, map_) self._add_depends_on(revisions, map_) if revision.is_base: self.bases += (revision.revision,) if revision._is_real_base: self._real_bases += (revision.revision,) for downrev in revision._all_down_revisions: if downrev not in map_: util.warn( "Revision %s referenced from %s is not present" % (downrev, revision) ) cast("Revision", map_[downrev]).add_nextrev(revision) self._normalize_depends_on(revisions, map_) if revision._is_real_head: self._real_heads = tuple( head for head in self._real_heads if head not in set(revision._all_down_revisions).union( [revision.revision] ) ) + (revision.revision,) if revision.is_head: self.heads = tuple( head for head in self.heads if head not in set(revision._versioned_down_revisions).union( [revision.revision] ) ) + (revision.revision,) def get_current_head( self, branch_label: Optional[str] = None ) -> Optional[str]: """Return the current head revision. If the script directory has multiple heads due to branching, an error is raised; :meth:`.ScriptDirectory.get_heads` should be preferred. :param branch_label: optional branch name which will limit the heads considered to those which include that branch_label. :return: a string revision number. .. seealso:: :meth:`.ScriptDirectory.get_heads` """ current_heads: Sequence[str] = self.heads if branch_label: current_heads = self.filter_for_lineage( current_heads, branch_label ) if len(current_heads) > 1: raise MultipleHeads( current_heads, "%s@head" % branch_label if branch_label else "head", ) if current_heads: return current_heads[0] else: return None def _get_base_revisions(self, identifier: str) -> Tuple[str, ...]: return self.filter_for_lineage(self.bases, identifier) def get_revisions( self, id_: Union[str, Collection[str], None] ) -> Tuple["Revision", ...]: """Return the :class:`.Revision` instances with the given rev id or identifiers. May be given a single identifier, a sequence of identifiers, or the special symbols "head" or "base". The result is a tuple of one or more identifiers, or an empty tuple in the case of "base". In the cases where 'head', 'heads' is requested and the revision map is empty, returns an empty tuple. Supports partial identifiers, where the given identifier is matched against all identifiers that start with the given characters; if there is exactly one match, that determines the full revision. """ if isinstance(id_, (list, tuple, set, frozenset)): return sum([self.get_revisions(id_elem) for id_elem in id_], ()) else: resolved_id, branch_label = self._resolve_revision_number( id_ # type:ignore [arg-type] ) if len(resolved_id) == 1: try: rint = int(resolved_id[0]) if rint < 0: # branch@-n -> walk down from heads select_heads = self.get_revisions("heads") if branch_label is not None: select_heads = tuple( head for head in select_heads if branch_label in head.branch_labels ) return tuple( self._walk(head, steps=rint) for head in select_heads ) except ValueError: # couldn't resolve as integer pass return tuple( self._revision_for_ident(rev_id, branch_label) for rev_id in resolved_id ) def get_revision(self, id_: Optional[str]) -> "Revision": """Return the :class:`.Revision` instance with the given rev id. If a symbolic name such as "head" or "base" is given, resolves the identifier into the current head or base revision. If the symbolic name refers to multiples, :class:`.MultipleHeads` is raised. Supports partial identifiers, where the given identifier is matched against all identifiers that start with the given characters; if there is exactly one match, that determines the full revision. """ resolved_id, branch_label = self._resolve_revision_number(id_) if len(resolved_id) > 1: raise MultipleHeads(resolved_id, id_) elif resolved_id: resolved_id = resolved_id[0] # type:ignore[assignment] return self._revision_for_ident(cast(str, resolved_id), branch_label) def _resolve_branch(self, branch_label: str) -> "Revision": try: branch_rev = self._revision_map[branch_label] except KeyError: try: nonbranch_rev = self._revision_for_ident(branch_label) except ResolutionError as re: raise ResolutionError( "No such branch: '%s'" % branch_label, branch_label ) from re else: return nonbranch_rev else: return cast("Revision", branch_rev) def _revision_for_ident( self, resolved_id: str, check_branch: Optional[str] = None ) -> "Revision": branch_rev: Optional["Revision"] if check_branch: branch_rev = self._resolve_branch(check_branch) else: branch_rev = None revision: Union["Revision", "Literal[False]"] try: revision = cast("Revision", self._revision_map[resolved_id]) except KeyError: # break out to avoid misleading py3k stack traces revision = False revs: Sequence[str] if revision is False: # do a partial lookup revs = [ x for x in self._revision_map if x and len(x) > 3 and x.startswith(resolved_id) ] if branch_rev: revs = self.filter_for_lineage(revs, check_branch) if not revs: raise ResolutionError( "No such revision or branch '%s'%s" % ( resolved_id, ( "; please ensure at least four characters are " "present for partial revision identifier matches" if len(resolved_id) < 4 else "" ), ), resolved_id, ) elif len(revs) > 1: raise ResolutionError( "Multiple revisions start " "with '%s': %s..." % (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])), resolved_id, ) else: revision = cast("Revision", self._revision_map[revs[0]]) revision = cast("Revision", revision) if check_branch and revision is not None: assert branch_rev is not None if not self._shares_lineage( revision.revision, branch_rev.revision ): raise ResolutionError( "Revision %s is not a member of branch '%s'" % (revision.revision, check_branch), resolved_id, ) return revision def _filter_into_branch_heads( self, targets: Set["Script"] ) -> Set["Script"]: targets = set(targets) for rev in list(targets): if targets.intersection( self._get_descendant_nodes([rev], include_dependencies=False) ).difference([rev]): targets.discard(rev) return targets def filter_for_lineage( self, targets: Iterable[_T], check_against: Optional[str], include_dependencies: bool = False, ) -> Tuple[_T, ...]: id_, branch_label = self._resolve_revision_number(check_against) shares = [] if branch_label: shares.append(branch_label) if id_: shares.extend(id_) return tuple( tg for tg in targets if self._shares_lineage( tg, shares, include_dependencies=include_dependencies ) ) def _shares_lineage( self, target: _RevisionOrStr, test_against_revs: Sequence[_RevisionOrStr], include_dependencies: bool = False, ) -> bool: if not test_against_revs: return True if not isinstance(target, Revision): target = self._revision_for_ident(target) test_against_revs = [ self._revision_for_ident(test_against_rev) if not isinstance(test_against_rev, Revision) else test_against_rev for test_against_rev in util.to_tuple( test_against_revs, default=() ) ] return bool( set( self._get_descendant_nodes( [target], include_dependencies=include_dependencies ) ) .union( self._get_ancestor_nodes( [target], include_dependencies=include_dependencies ) ) .intersection(test_against_revs) ) def _resolve_revision_number( self, id_: Optional[str] ) -> Tuple[Tuple[str, ...], Optional[str]]: branch_label: Optional[str] if isinstance(id_, str) and "@" in id_: branch_label, id_ = id_.split("@", 1) elif id_ is not None and ( (isinstance(id_, tuple) and id_ and not isinstance(id_[0], str)) or not isinstance(id_, (str, tuple)) ): raise RevisionError( "revision identifier %r is not a string; ensure database " "driver settings are correct" % (id_,) ) else: branch_label = None # ensure map is loaded self._revision_map if id_ == "heads": if branch_label: return ( self.filter_for_lineage(self.heads, branch_label), branch_label, ) else: return self._real_heads, branch_label elif id_ == "head": current_head = self.get_current_head(branch_label) if current_head: return (current_head,), branch_label else: return (), branch_label elif id_ == "base" or id_ is None: return (), branch_label else: return util.to_tuple(id_, default=None), branch_label def iterate_revisions( self, upper: _RevisionIdentifierType, lower: _RevisionIdentifierType, implicit_base: bool = False, inclusive: bool = False, assert_relative_length: bool = True, select_for_downgrade: bool = False, ) -> Iterator["Revision"]: """Iterate through script revisions, starting at the given upper revision identifier and ending at the lower. The traversal uses strictly the `down_revision` marker inside each migration script, so it is a requirement that upper >= lower, else you'll get nothing back. The iterator yields :class:`.Revision` objects. """ fn: Callable if select_for_downgrade: fn = self._collect_downgrade_revisions else: fn = self._collect_upgrade_revisions revisions, heads = fn( upper, lower, inclusive=inclusive, implicit_base=implicit_base, assert_relative_length=assert_relative_length, ) for node in self._topological_sort(revisions, heads): yield self.get_revision(node) def _get_descendant_nodes( self, targets: Collection["Revision"], map_: Optional[_RevisionMapType] = None, check: bool = False, omit_immediate_dependencies: bool = False, include_dependencies: bool = True, ) -> Iterator[Any]: if omit_immediate_dependencies: def fn(rev): if rev not in targets: return rev._all_nextrev else: return rev.nextrev elif include_dependencies: def fn(rev): return rev._all_nextrev else: def fn(rev): return rev.nextrev return self._iterate_related_revisions( fn, targets, map_=map_, check=check ) def _get_ancestor_nodes( self, targets: Collection["Revision"], map_: Optional[_RevisionMapType] = None, check: bool = False, include_dependencies: bool = True, ) -> Iterator["Revision"]: if include_dependencies: def fn(rev): return rev._normalized_down_revisions else: def fn(rev): return rev._versioned_down_revisions return self._iterate_related_revisions( fn, targets, map_=map_, check=check ) def _iterate_related_revisions( self, fn: Callable, targets: Collection["Revision"], map_: Optional[_RevisionMapType], check: bool = False, ) -> Iterator["Revision"]: if map_ is None: map_ = self._revision_map seen = set() todo: Deque["Revision"] = collections.deque() for target in targets: todo.append(target) if check: per_target = set() while todo: rev = todo.pop() if check: per_target.add(rev) if rev in seen: continue seen.add(rev) # Check for map errors before collecting. for rev_id in fn(rev): next_rev = map_[rev_id] assert next_rev is not None if next_rev.revision != rev_id: raise RevisionError( "Dependency resolution failed; broken map" ) todo.append(next_rev) yield rev if check: overlaps = per_target.intersection(targets).difference( [target] ) if overlaps: raise RevisionError( "Requested revision %s overlaps with " "other requested revisions %s" % ( target.revision, ", ".join(r.revision for r in overlaps), ) ) def _topological_sort( self, revisions: Collection["Revision"], heads: Any, ) -> List[str]: """Yield revision ids of a collection of Revision objects in topological sorted order (i.e. revisions always come after their down_revisions and dependencies). Uses the order of keys in _revision_map to sort. """ id_to_rev = self._revision_map def get_ancestors(rev_id): return { r.revision for r in self._get_ancestor_nodes([id_to_rev[rev_id]]) } todo = {d.revision for d in revisions} # Use revision map (ordered dict) key order to pre-sort. inserted_order = list(self._revision_map) current_heads = list( sorted( {d.revision for d in heads if d.revision in todo}, key=inserted_order.index, ) ) ancestors_by_idx = [get_ancestors(rev_id) for rev_id in current_heads] output = [] current_candidate_idx = 0 while current_heads: candidate = current_heads[current_candidate_idx] for check_head_index, ancestors in enumerate(ancestors_by_idx): # scan all the heads. see if we can continue walking # down the current branch indicated by current_candidate_idx. if ( check_head_index != current_candidate_idx and candidate in ancestors ): current_candidate_idx = check_head_index # nope, another head is dependent on us, they have # to be traversed first break else: # yup, we can emit if candidate in todo: output.append(candidate) todo.remove(candidate) # now update the heads with our ancestors. candidate_rev = id_to_rev[candidate] assert candidate_rev is not None heads_to_add = [ r for r in candidate_rev._normalized_down_revisions if r in todo and r not in current_heads ] if not heads_to_add: # no ancestors, so remove this head from the list del current_heads[current_candidate_idx] del ancestors_by_idx[current_candidate_idx] current_candidate_idx = max(current_candidate_idx - 1, 0) else: if ( not candidate_rev._normalized_resolved_dependencies and len(candidate_rev._versioned_down_revisions) == 1 ): current_heads[current_candidate_idx] = heads_to_add[0] # for plain movement down a revision line without # any mergepoints, branchpoints, or deps, we # can update the ancestors collection directly # by popping out the candidate we just emitted ancestors_by_idx[current_candidate_idx].discard( candidate ) else: # otherwise recalculate it again, things get # complicated otherwise. This can possibly be # improved to not run the whole ancestor thing # each time but it was getting complicated current_heads[current_candidate_idx] = heads_to_add[0] current_heads.extend(heads_to_add[1:]) ancestors_by_idx[ current_candidate_idx ] = get_ancestors(heads_to_add[0]) ancestors_by_idx.extend( get_ancestors(head) for head in heads_to_add[1:] ) assert not todo return output def _walk( self, start: Optional[Union[str, "Revision"]], steps: int, branch_label: Optional[str] = None, no_overwalk: bool = True, ) -> "Revision": """ Walk the requested number of :steps up (steps > 0) or down (steps < 0) the revision tree. :branch_label is used to select branches only when walking up. If the walk goes past the boundaries of the tree and :no_overwalk is True, None is returned, otherwise the walk terminates early. A RevisionError is raised if there is no unambiguous revision to walk to. """ initial: Optional[_RevisionOrBase] if isinstance(start, str): initial = self.get_revision(start) else: initial = start children: Sequence[_RevisionOrBase] for _ in range(abs(steps)): if steps > 0: # Walk up children = [ rev for rev in self.get_revisions( self.bases if initial is None else cast("Revision", initial).nextrev ) ] if branch_label: children = self.filter_for_lineage(children, branch_label) else: # Walk down if initial == "base": children = () else: children = self.get_revisions( self.heads if initial is None else initial.down_revision ) if not children: children = cast("Tuple[Literal['base']]", ("base",)) if not children: # This will return an invalid result if no_overwalk, otherwise # further steps will stay where we are. ret = None if no_overwalk else initial return ret # type:ignore[return-value] elif len(children) > 1: raise RevisionError("Ambiguous walk") initial = children[0] return cast("Revision", initial) def _parse_downgrade_target( self, current_revisions: _RevisionIdentifierType, target: _RevisionIdentifierType, assert_relative_length: bool, ) -> Tuple[Optional[str], Optional[_RevisionOrBase]]: """ Parse downgrade command syntax :target to retrieve the target revision and branch label (if any) given the :current_revisons stamp of the database. Returns a tuple (branch_label, target_revision) where branch_label is a string from the command specifying the branch to consider (or None if no branch given), and target_revision is a Revision object which the command refers to. target_revsions is None if the command refers to 'base'. The target may be specified in absolute form, or relative to :current_revisions. """ if target is None: return None, None assert isinstance( target, str ), "Expected downgrade target in string form" match = _relative_destination.match(target) if match: branch_label, symbol, relative = match.groups() rel_int = int(relative) if rel_int >= 0: if symbol is None: # Downgrading to current + n is not valid. raise RevisionError( "Relative revision %s didn't " "produce %d migrations" % (relative, abs(rel_int)) ) # Find target revision relative to given symbol. rev = self._walk( symbol, rel_int, branch_label, no_overwalk=assert_relative_length, ) if rev is None: raise RevisionError("Walked too far") return branch_label, rev else: relative_revision = symbol is None if relative_revision: # Find target revision relative to current state. if branch_label: symbol_list = self.filter_for_lineage( util.to_tuple(current_revisions), branch_label ) assert len(symbol_list) == 1 symbol = symbol_list[0] else: current_revisions = util.to_tuple(current_revisions) if not current_revisions: raise RevisionError( "Relative revision %s didn't " "produce %d migrations" % (relative, abs(rel_int)) ) # Have to check uniques here for duplicate rows test. if len(set(current_revisions)) > 1: util.warn( "downgrade -1 from multiple heads is " "ambiguous; " "this usage will be disallowed in a future " "release." ) symbol = current_revisions[0] # Restrict iteration to just the selected branch when # ambiguous branches are involved. branch_label = symbol # Walk down the tree to find downgrade target. rev = self._walk( start=self.get_revision(symbol) if branch_label is None else self.get_revision("%s@%s" % (branch_label, symbol)), steps=rel_int, no_overwalk=assert_relative_length, ) if rev is None: if relative_revision: raise RevisionError( "Relative revision %s didn't " "produce %d migrations" % (relative, abs(rel_int)) ) else: raise RevisionError("Walked too far") return branch_label, rev # No relative destination given, revision specified is absolute. branch_label, _, symbol = target.rpartition("@") if not branch_label: branch_label = None # type:ignore[assignment] return branch_label, self.get_revision(symbol) def _parse_upgrade_target( self, current_revisions: _RevisionIdentifierType, target: _RevisionIdentifierType, assert_relative_length: bool, ) -> Tuple["Revision", ...]: """ Parse upgrade command syntax :target to retrieve the target revision and given the :current_revisons stamp of the database. Returns a tuple of Revision objects which should be iterated/upgraded to. The target may be specified in absolute form, or relative to :current_revisions. """ if isinstance(target, str): match = _relative_destination.match(target) else: match = None if not match: # No relative destination, target is absolute. return self.get_revisions(target) current_revisions = util.to_tuple(current_revisions) branch_label, symbol, relative_str = match.groups() relative = int(relative_str) if relative > 0: if symbol is None: if not current_revisions: current_revisions = (None,) # Try to filter to a single target (avoid ambiguous branches). start_revs = current_revisions if branch_label: start_revs = self.filter_for_lineage( self.get_revisions(current_revisions), branch_label ) if not start_revs: # The requested branch is not a head, so we need to # backtrack to find a branchpoint. active_on_branch = self.filter_for_lineage( self._get_ancestor_nodes( self.get_revisions(current_revisions) ), branch_label, ) # Find the tips of this set of revisions (revisions # without children within the set). start_revs = tuple( {rev.revision for rev in active_on_branch} - { down for rev in active_on_branch for down in rev._normalized_down_revisions } ) if not start_revs: # We must need to go right back to base to find # a starting point for this branch. start_revs = (None,) if len(start_revs) > 1: raise RevisionError( "Ambiguous upgrade from multiple current revisions" ) # Walk up from unique target revision. rev = self._walk( start=start_revs[0], steps=relative, branch_label=branch_label, no_overwalk=assert_relative_length, ) if rev is None: raise RevisionError( "Relative revision %s didn't " "produce %d migrations" % (relative_str, abs(relative)) ) return (rev,) else: # Walk is relative to a given revision, not the current state. return ( self._walk( start=self.get_revision(symbol), steps=relative, branch_label=branch_label, no_overwalk=assert_relative_length, ), ) else: if symbol is None: # Upgrading to current - n is not valid. raise RevisionError( "Relative revision %s didn't " "produce %d migrations" % (relative, abs(relative)) ) return ( self._walk( start=self.get_revision(symbol) if branch_label is None else self.get_revision("%s@%s" % (branch_label, symbol)), steps=relative, no_overwalk=assert_relative_length, ), ) def _collect_downgrade_revisions( self, upper: _RevisionIdentifierType, target: _RevisionIdentifierType, inclusive: bool, implicit_base: bool, assert_relative_length: bool, ) -> Any: """ Compute the set of current revisions specified by :upper, and the downgrade target specified by :target. Return all dependents of target which are currently active. :inclusive=True includes the target revision in the set """ branch_label, target_revision = self._parse_downgrade_target( current_revisions=upper, target=target, assert_relative_length=assert_relative_length, ) if target_revision == "base": target_revision = None assert target_revision is None or isinstance(target_revision, Revision) # Find candidates to drop. if target_revision is None: # Downgrading back to base: find all tree roots. roots = [ rev for rev in self._revision_map.values() if rev is not None and rev.down_revision is None ] elif inclusive: # inclusive implies target revision should also be dropped roots = [target_revision] else: # Downgrading to fixed target: find all direct children. roots = list(self.get_revisions(target_revision.nextrev)) if branch_label and len(roots) > 1: # Need to filter roots. ancestors = { rev.revision for rev in self._get_ancestor_nodes( [self._resolve_branch(branch_label)], include_dependencies=False, ) } # Intersection gives the root revisions we are trying to # rollback with the downgrade. roots = list( self.get_revisions( {rev.revision for rev in roots}.intersection(ancestors) ) ) # Ensure we didn't throw everything away when filtering branches. if len(roots) == 0: raise RevisionError( "Not a valid downgrade target from current heads" ) heads = self.get_revisions(upper) # Aim is to drop :branch_revision; to do so we also need to drop its # descendents and anything dependent on it. downgrade_revisions = set( self._get_descendant_nodes( roots, include_dependencies=True, omit_immediate_dependencies=False, ) ) active_revisions = set( self._get_ancestor_nodes(heads, include_dependencies=True) ) # Emit revisions to drop in reverse topological sorted order. downgrade_revisions.intersection_update(active_revisions) if implicit_base: # Wind other branches back to base. downgrade_revisions.update( active_revisions.difference(self._get_ancestor_nodes(roots)) ) if ( target_revision is not None and not downgrade_revisions and target_revision not in heads ): # Empty intersection: target revs are not present. raise RangeNotAncestorError("Nothing to drop", upper) return downgrade_revisions, heads def _collect_upgrade_revisions( self, upper: _RevisionIdentifierType, lower: _RevisionIdentifierType, inclusive: bool, implicit_base: bool, assert_relative_length: bool, ) -> Tuple[Set["Revision"], Tuple[Optional[_RevisionOrBase]]]: """ Compute the set of required revisions specified by :upper, and the current set of active revisions specified by :lower. Find the difference between the two to compute the required upgrades. :inclusive=True includes the current/lower revisions in the set :implicit_base=False only returns revisions which are downstream of the current/lower revisions. Dependencies from branches with different bases will not be included. """ targets: Collection["Revision"] = self._parse_upgrade_target( current_revisions=lower, target=upper, assert_relative_length=assert_relative_length, ) # assert type(targets) is tuple, "targets should be a tuple" # Handled named bases (e.g. branch@... -> heads should only produce # targets on the given branch) if isinstance(lower, str) and "@" in lower: branch, _, _ = lower.partition("@") branch_rev = self.get_revision(branch) if branch_rev is not None and branch_rev.revision == branch: # A revision was used as a label; get its branch instead assert len(branch_rev.branch_labels) == 1 branch = next(iter(branch_rev.branch_labels)) targets = { need for need in targets if branch in need.branch_labels } required_node_set = set( self._get_ancestor_nodes( targets, check=True, include_dependencies=True ) ).union(targets) current_revisions = self.get_revisions(lower) if not implicit_base and any( rev not in required_node_set for rev in current_revisions if rev is not None ): raise RangeNotAncestorError(lower, upper) assert ( type(current_revisions) is tuple ), "current_revisions should be a tuple" # Special case where lower = a relative value (get_revisions can't # find it) if current_revisions and current_revisions[0] is None: _, rev = self._parse_downgrade_target( current_revisions=upper, target=lower, assert_relative_length=assert_relative_length, ) if rev == "base": current_revisions = tuple() lower = None else: current_revisions = (rev,) lower = rev.revision current_node_set = set( self._get_ancestor_nodes( current_revisions, check=True, include_dependencies=True ) ).union(current_revisions) needs = required_node_set.difference(current_node_set) # Include the lower revision (=current_revisions?) in the iteration if inclusive: needs.update(self.get_revisions(lower)) # By default, base is implicit as we want all dependencies returned. # Base is also implicit if lower = base # implicit_base=False -> only return direct downstreams of # current_revisions if current_revisions and not implicit_base: lower_descendents = self._get_descendant_nodes( current_revisions, check=True, include_dependencies=False ) needs.intersection_update(lower_descendents) return needs, tuple(targets) # type:ignore[return-value] class Revision: """Base class for revisioned objects. The :class:`.Revision` class is the base of the more public-facing :class:`.Script` object, which represents a migration script. The mechanics of revision management and traversal are encapsulated within :class:`.Revision`, while :class:`.Script` applies this logic to Python files in a version directory. """ nextrev: FrozenSet[str] = frozenset() """following revisions, based on down_revision only.""" _all_nextrev: FrozenSet[str] = frozenset() revision: str = None # type: ignore[assignment] """The string revision number.""" down_revision: Optional[_RevIdType] = None """The ``down_revision`` identifier(s) within the migration script. Note that the total set of "down" revisions is down_revision + dependencies. """ dependencies: Optional[_RevIdType] = None """Additional revisions which this revision is dependent on. From a migration standpoint, these dependencies are added to the down_revision to form the full iteration. However, the separation of down_revision from "dependencies" is to assist in navigating a history that contains many branches, typically a multi-root scenario. """ branch_labels: Set[str] = None # type: ignore[assignment] """Optional string/tuple of symbolic names to apply to this revision's branch""" _resolved_dependencies: Tuple[str, ...] _normalized_resolved_dependencies: Tuple[str, ...] @classmethod def verify_rev_id(cls, revision: str) -> None: illegal_chars = set(revision).intersection(_revision_illegal_chars) if illegal_chars: raise RevisionError( "Character(s) '%s' not allowed in revision identifier '%s'" % (", ".join(sorted(illegal_chars)), revision) ) def __init__( self, revision: str, down_revision: Optional[Union[str, Tuple[str, ...]]], dependencies: Optional[Tuple[str, ...]] = None, branch_labels: Optional[Tuple[str, ...]] = None, ) -> None: if down_revision and revision in util.to_tuple(down_revision): raise LoopDetected(revision) elif dependencies is not None and revision in util.to_tuple( dependencies ): raise DependencyLoopDetected(revision) self.verify_rev_id(revision) self.revision = revision self.down_revision = tuple_rev_as_scalar(down_revision) self.dependencies = tuple_rev_as_scalar(dependencies) self._orig_branch_labels = util.to_tuple(branch_labels, default=()) self.branch_labels = set(self._orig_branch_labels) def __repr__(self) -> str: args = [repr(self.revision), repr(self.down_revision)] if self.dependencies: args.append("dependencies=%r" % (self.dependencies,)) if self.branch_labels: args.append("branch_labels=%r" % (self.branch_labels,)) return "%s(%s)" % (self.__class__.__name__, ", ".join(args)) def add_nextrev(self, revision: "Revision") -> None: self._all_nextrev = self._all_nextrev.union([revision.revision]) if self.revision in revision._versioned_down_revisions: self.nextrev = self.nextrev.union([revision.revision]) @property def _all_down_revisions(self) -> Tuple[str, ...]: return util.dedupe_tuple( util.to_tuple(self.down_revision, default=()) + self._resolved_dependencies ) @property def _normalized_down_revisions(self) -> Tuple[str, ...]: """return immediate down revisions for a rev, omitting dependencies that are still dependencies of ancestors. """ return util.dedupe_tuple( util.to_tuple(self.down_revision, default=()) + self._normalized_resolved_dependencies ) @property def _versioned_down_revisions(self) -> Tuple[str, ...]: return util.to_tuple(self.down_revision, default=()) @property def is_head(self) -> bool: """Return True if this :class:`.Revision` is a 'head' revision. This is determined based on whether any other :class:`.Script` within the :class:`.ScriptDirectory` refers to this :class:`.Script`. Multiple heads can be present. """ return not bool(self.nextrev) @property def _is_real_head(self) -> bool: return not bool(self._all_nextrev) @property def is_base(self) -> bool: """Return True if this :class:`.Revision` is a 'base' revision.""" return self.down_revision is None @property def _is_real_base(self) -> bool: """Return True if this :class:`.Revision` is a "real" base revision, e.g. that it has no dependencies either.""" # we use self.dependencies here because this is called up # in initialization where _real_dependencies isn't set up # yet return self.down_revision is None and self.dependencies is None @property def is_branch_point(self) -> bool: """Return True if this :class:`.Script` is a branch point. A branchpoint is defined as a :class:`.Script` which is referred to by more than one succeeding :class:`.Script`, that is more than one :class:`.Script` has a `down_revision` identifier pointing here. """ return len(self.nextrev) > 1 @property def _is_real_branch_point(self) -> bool: """Return True if this :class:`.Script` is a 'real' branch point, taking into account dependencies as well. """ return len(self._all_nextrev) > 1 @property def is_merge_point(self) -> bool: """Return True if this :class:`.Script` is a merge point.""" return len(self._versioned_down_revisions) > 1 def tuple_rev_as_scalar( rev: Optional[Sequence[str]], ) -> Optional[Union[str, Sequence[str]]]: if not rev: return None elif len(rev) == 1: return rev[0] else: return rev alembic-rel_1_7_6/alembic/script/write_hooks.py000066400000000000000000000101121417624537100217410ustar00rootroot00000000000000import shlex import subprocess import sys from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Union from .. import util from ..util import compat REVISION_SCRIPT_TOKEN = "REVISION_SCRIPT_FILENAME" _registry = {} def register(name: str) -> Callable: """A function decorator that will register that function as a write hook. See the documentation linked below for an example. .. versionadded:: 1.2.0 .. seealso:: :ref:`post_write_hooks_custom` """ def decorate(fn): _registry[name] = fn return fn return decorate def _invoke( name: str, revision: str, options: Dict[str, Union[str, int]] ) -> Any: """Invokes the formatter registered for the given name. :param name: The name of a formatter in the registry :param revision: A :class:`.MigrationRevision` instance :param options: A dict containing kwargs passed to the specified formatter. :raises: :class:`alembic.util.CommandError` """ try: hook = _registry[name] except KeyError as ke: raise util.CommandError( "No formatter with name '%s' registered" % name ) from ke else: return hook(revision, options) def _run_hooks(path: str, hook_config: Dict[str, str]) -> None: """Invoke hooks for a generated revision.""" from .base import _split_on_space_comma names = _split_on_space_comma.split(hook_config.get("hooks", "")) for name in names: if not name: continue opts = { key[len(name) + 1 :]: hook_config[key] for key in hook_config if key.startswith(name + ".") } opts["_hook_name"] = name try: type_ = opts["type"] except KeyError as ke: raise util.CommandError( "Key %s.type is required for post write hook %r" % (name, name) ) from ke else: util.status( 'Running post write hook "%s"' % name, _invoke, type_, path, opts, newline=True, ) def _parse_cmdline_options(cmdline_options_str: str, path: str) -> List[str]: """Parse options from a string into a list. Also substitutes the revision script token with the actual filename of the revision script. If the revision script token doesn't occur in the options string, it is automatically prepended. """ if REVISION_SCRIPT_TOKEN not in cmdline_options_str: cmdline_options_str = REVISION_SCRIPT_TOKEN + " " + cmdline_options_str cmdline_options_list = shlex.split( cmdline_options_str, posix=compat.is_posix ) cmdline_options_list = [ option.replace(REVISION_SCRIPT_TOKEN, path) for option in cmdline_options_list ] return cmdline_options_list @register("console_scripts") def console_scripts( path: str, options: dict, ignore_output: bool = False ) -> None: try: entrypoint_name = options["entrypoint"] except KeyError as ke: raise util.CommandError( "Key %s.entrypoint is required for post write hook %r" % (options["_hook_name"], options["_hook_name"]) ) from ke for entry in compat.importlib_metadata_get("console_scripts"): if entry.name == entrypoint_name: impl: Any = entry break else: raise util.CommandError( f"Could not find entrypoint console_scripts.{entrypoint_name}" ) cwd: Optional[str] = options.get("cwd", None) cmdline_options_str = options.get("options", "") cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path) kw: Dict[str, Any] = {} if ignore_output: kw["stdout"] = kw["stderr"] = subprocess.DEVNULL subprocess.run( [ sys.executable, "-c", "import %s; %s.%s()" % (impl.module, impl.module, impl.attr), ] + cmdline_options_list, cwd=cwd, **kw, ) alembic-rel_1_7_6/alembic/templates/000077500000000000000000000000001417624537100175315ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/templates/async/000077500000000000000000000000001417624537100206465ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/templates/async/README000066400000000000000000000000721417624537100215250ustar00rootroot00000000000000Generic single-database configuration with an async dbapi.alembic-rel_1_7_6/alembic/templates/async/alembic.ini.mako000066400000000000000000000056041417624537100236760ustar00rootroot00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # timezone to use when rendering the date within the migration file # as well as the filename. # If specified, requires the python-dateutil library that can be # installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; This defaults # to ${script_location}/versions. When using multiple version # directories, initial revisions must be specified with --version-path. # The path separator used here should be the separator specified by "version_path_separator" below. # version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions # version path separator; As mentioned above, this is the character used to split # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. # Valid values for version_path_separator are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space version_path_separator = os # Use os.pathsep. Default configuration used for new projects. # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = driver://user:pass@localhost/dbname [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks = black # black.type = console_scripts # black.entrypoint = black # black.options = -l 79 REVISION_SCRIPT_FILENAME # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S alembic-rel_1_7_6/alembic/templates/async/env.py000066400000000000000000000043531417624537100220150ustar00rootroot00000000000000import asyncio from logging.config import fileConfig from sqlalchemy import engine_from_config from sqlalchemy import pool from sqlalchemy.ext.asyncio import AsyncEngine from alembic import context # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def do_run_migrations(connection): context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() async def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = AsyncEngine( engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, future=True, ) ) async with connectable.connect() as connection: await connection.run_sync(do_run_migrations) await connectable.dispose() if context.is_offline_mode(): run_migrations_offline() else: asyncio.run(run_migrations_online()) alembic-rel_1_7_6/alembic/templates/async/script.py.mako000066400000000000000000000007561417624537100234620ustar00rootroot00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} alembic-rel_1_7_6/alembic/templates/generic/000077500000000000000000000000001417624537100211455ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/templates/generic/README000066400000000000000000000000461417624537100220250ustar00rootroot00000000000000Generic single-database configuration.alembic-rel_1_7_6/alembic/templates/generic/alembic.ini.mako000066400000000000000000000056041417624537100241750ustar00rootroot00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # timezone to use when rendering the date within the migration file # as well as the filename. # If specified, requires the python-dateutil library that can be # installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; This defaults # to ${script_location}/versions. When using multiple version # directories, initial revisions must be specified with --version-path. # The path separator used here should be the separator specified by "version_path_separator" below. # version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions # version path separator; As mentioned above, this is the character used to split # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. # Valid values for version_path_separator are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space version_path_separator = os # Use os.pathsep. Default configuration used for new projects. # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = driver://user:pass@localhost/dbname [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks = black # black.type = console_scripts # black.entrypoint = black # black.options = -l 79 REVISION_SCRIPT_FILENAME # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S alembic-rel_1_7_6/alembic/templates/generic/env.py000066400000000000000000000037671417624537100223240ustar00rootroot00000000000000from logging.config import fileConfig from sqlalchemy import engine_from_config from sqlalchemy import pool from alembic import context # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() alembic-rel_1_7_6/alembic/templates/generic/script.py.mako000066400000000000000000000007561417624537100237610ustar00rootroot00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} alembic-rel_1_7_6/alembic/templates/multidb/000077500000000000000000000000001417624537100211715ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/templates/multidb/README000066400000000000000000000011361417624537100220520ustar00rootroot00000000000000Rudimentary multi-database configuration. Multi-DB isn't vastly different from generic. The primary difference is that it will run the migrations N times (depending on how many databases you have configured), providing one engine name and associated context for each run. That engine name will then allow the migration to restrict what runs within it to just the appropriate migrations for that engine. You can see this behavior within the mako template. In the provided configuration, you'll need to have `databases` provided in alembic's config, and an `sqlalchemy.url` provided for each engine name. alembic-rel_1_7_6/alembic/templates/multidb/alembic.ini.mako000066400000000000000000000057421417624537100242240ustar00rootroot00000000000000# a multi-database configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # timezone to use when rendering the date within the migration file # as well as the filename. # If specified, requires the python-dateutil library that can be # installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; This defaults # to ${script_location}/versions. When using multiple version # directories, initial revisions must be specified with --version-path. # The path separator used here should be the separator specified by "version_path_separator" below. # version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions # version path separator; As mentioned above, this is the character used to split # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. # Valid values for version_path_separator are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space version_path_separator = os # Use os.pathsep. Default configuration used for new projects. # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 databases = engine1, engine2 [engine1] sqlalchemy.url = driver://user:pass@localhost/dbname [engine2] sqlalchemy.url = driver://user:pass@localhost/dbname2 [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks = black # black.type = console_scripts # black.entrypoint = black # black.options = -l 79 REVISION_SCRIPT_FILENAME # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S alembic-rel_1_7_6/alembic/templates/multidb/env.py000066400000000000000000000101021417624537100223250ustar00rootroot00000000000000import logging from logging.config import fileConfig import re from sqlalchemy import engine_from_config from sqlalchemy import pool from alembic import context USE_TWOPHASE = False # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) logger = logging.getLogger("alembic.env") # gather section names referring to different # databases. These are named "engine1", "engine2" # in the sample .ini file. db_names = config.get_main_option("databases") # add your model's MetaData objects here # for 'autogenerate' support. These must be set # up to hold just those tables targeting a # particular database. table.tometadata() may be # helpful here in case a "copy" of # a MetaData is needed. # from myapp import mymodel # target_metadata = { # 'engine1':mymodel.metadata1, # 'engine2':mymodel.metadata2 # } target_metadata = {} # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ # for the --sql use case, run migrations for each URL into # individual files. engines = {} for name in re.split(r",\s*", db_names): engines[name] = rec = {} rec["url"] = context.config.get_section_option(name, "sqlalchemy.url") for name, rec in engines.items(): logger.info("Migrating database %s" % name) file_ = "%s.sql" % name logger.info("Writing output to %s" % file_) with open(file_, "w") as buffer: context.configure( url=rec["url"], output_buffer=buffer, target_metadata=target_metadata.get(name), literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations(engine_name=name) def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # for the direct-to-DB use case, start a transaction on all # engines, then run all migrations, then commit all transactions. engines = {} for name in re.split(r",\s*", db_names): engines[name] = rec = {} rec["engine"] = engine_from_config( context.config.get_section(name), prefix="sqlalchemy.", poolclass=pool.NullPool, ) for name, rec in engines.items(): engine = rec["engine"] rec["connection"] = conn = engine.connect() if USE_TWOPHASE: rec["transaction"] = conn.begin_twophase() else: rec["transaction"] = conn.begin() try: for name, rec in engines.items(): logger.info("Migrating database %s" % name) context.configure( connection=rec["connection"], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=target_metadata.get(name), ) context.run_migrations(engine_name=name) if USE_TWOPHASE: for rec in engines.values(): rec["transaction"].prepare() for rec in engines.values(): rec["transaction"].commit() except: for rec in engines.values(): rec["transaction"].rollback() raise finally: for rec in engines.values(): rec["connection"].close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() alembic-rel_1_7_6/alembic/templates/multidb/script.py.mako000066400000000000000000000016331417624537100240000ustar00rootroot00000000000000<%! import re %>"""${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() <% db_names = config.get_main_option("databases") %> ## generate an "upgrade_() / downgrade_()" function ## for each database name in the ini file. % for db_name in re.split(r',\s*', db_names): def upgrade_${db_name}(): ${context.get("%s_upgrades" % db_name, "pass")} def downgrade_${db_name}(): ${context.get("%s_downgrades" % db_name, "pass")} % endfor alembic-rel_1_7_6/alembic/templates/pylons/000077500000000000000000000000001417624537100210555ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/templates/pylons/README000066400000000000000000000000731417624537100217350ustar00rootroot00000000000000Configuration that reads from a Pylons project environment.alembic-rel_1_7_6/alembic/templates/pylons/alembic.ini.mako000066400000000000000000000045741417624537100241120ustar00rootroot00000000000000# a Pylons configuration. [alembic] # path to migration scripts script_location = ${script_location} # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # timezone to use when rendering the date within the migration file # as well as the filename. # If specified, requires the python-dateutil library that can be # installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; This defaults # to ${script_location}/versions. When using multiple version # directories, initial revisions must be specified with --version-path. # The path separator used here should be the separator specified by "version_path_separator" below. # version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions # version path separator; As mentioned above, this is the character used to split # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. # Valid values for version_path_separator are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space version_path_separator = os # Use os.pathsep. Default configuration used for new projects. # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks = black # black.type = console_scripts # black.entrypoint = black # black.options = -l 79 REVISION_SCRIPT_FILENAME pylons_config_file = ./development.ini # that's it ! alembic-rel_1_7_6/alembic/templates/pylons/env.py000066400000000000000000000043051417624537100222210ustar00rootroot00000000000000"""Pylons bootstrap environment. Place 'pylons_config_file' into alembic.ini, and the application will be loaded from there. """ from logging.config import fileConfig from paste.deploy import loadapp from alembic import context try: # if pylons app already in, don't create a new app from pylons import config as pylons_config pylons_config["__file__"] except: config = context.config # can use config['__file__'] here, i.e. the Pylons # ini file, instead of alembic.ini config_file = config.get_main_option("pylons_config_file") fileConfig(config_file) wsgi_app = loadapp("config:%s" % config_file, relative_to=".") # customize this section for non-standard engine configurations. meta = __import__( "%s.model.meta" % wsgi_app.config["pylons.package"] ).model.meta # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure( url=meta.engine.url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # specify here how the engine is acquired # engine = meta.engine raise NotImplementedError("Please specify engine connectivity here") with engine.connect() as connection: # noqa context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() alembic-rel_1_7_6/alembic/templates/pylons/script.py.mako000066400000000000000000000007561417624537100236710ustar00rootroot00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} alembic-rel_1_7_6/alembic/testing/000077500000000000000000000000001417624537100172105ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/testing/__init__.py000066400000000000000000000022071417624537100213220ustar00rootroot00000000000000from sqlalchemy.testing import config from sqlalchemy.testing import emits_warning from sqlalchemy.testing import engines from sqlalchemy.testing import exclusions from sqlalchemy.testing import mock from sqlalchemy.testing import provide_metadata from sqlalchemy.testing import skip_if from sqlalchemy.testing import uses_deprecated from sqlalchemy.testing.config import combinations from sqlalchemy.testing.config import fixture from sqlalchemy.testing.config import requirements as requires from .assertions import assert_raises from .assertions import assert_raises_message from .assertions import emits_python_deprecation_warning from .assertions import eq_ from .assertions import eq_ignore_whitespace from .assertions import expect_raises from .assertions import expect_raises_message from .assertions import expect_sqlalchemy_deprecated from .assertions import expect_sqlalchemy_deprecated_20 from .assertions import expect_warnings from .assertions import is_ from .assertions import is_false from .assertions import is_not_ from .assertions import is_true from .assertions import ne_ from .fixtures import TestBase from .util import resolve_lambda alembic-rel_1_7_6/alembic/testing/assertions.py000066400000000000000000000115611417624537100217600ustar00rootroot00000000000000import contextlib import re import sys from typing import Any from typing import Dict from sqlalchemy import exc as sa_exc from sqlalchemy.engine import default from sqlalchemy.testing.assertions import _expect_warnings from sqlalchemy.testing.assertions import eq_ # noqa from sqlalchemy.testing.assertions import is_ # noqa from sqlalchemy.testing.assertions import is_false # noqa from sqlalchemy.testing.assertions import is_not_ # noqa from sqlalchemy.testing.assertions import is_true # noqa from sqlalchemy.testing.assertions import ne_ # noqa from sqlalchemy.util import decorator from ..util import sqla_compat def _assert_proper_exception_context(exception): """assert that any exception we're catching does not have a __context__ without a __cause__, and that __suppress_context__ is never set. Python 3 will report nested as exceptions as "during the handling of error X, error Y occurred". That's not what we want to do. we want these exceptions in a cause chain. """ if ( exception.__context__ is not exception.__cause__ and not exception.__suppress_context__ ): assert False, ( "Exception %r was correctly raised but did not set a cause, " "within context %r as its cause." % (exception, exception.__context__) ) def assert_raises(except_cls, callable_, *args, **kw): return _assert_raises(except_cls, callable_, args, kw, check_context=True) def assert_raises_context_ok(except_cls, callable_, *args, **kw): return _assert_raises(except_cls, callable_, args, kw) def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): return _assert_raises( except_cls, callable_, args, kwargs, msg=msg, check_context=True ) def assert_raises_message_context_ok( except_cls, msg, callable_, *args, **kwargs ): return _assert_raises(except_cls, callable_, args, kwargs, msg=msg) def _assert_raises( except_cls, callable_, args, kwargs, msg=None, check_context=False ): with _expect_raises(except_cls, msg, check_context) as ec: callable_(*args, **kwargs) return ec.error class _ErrorContainer: error = None @contextlib.contextmanager def _expect_raises(except_cls, msg=None, check_context=False): ec = _ErrorContainer() if check_context: are_we_already_in_a_traceback = sys.exc_info()[0] try: yield ec success = False except except_cls as err: ec.error = err success = True if msg is not None: assert re.search(msg, str(err), re.UNICODE), f"{msg} !~ {err}" if check_context and not are_we_already_in_a_traceback: _assert_proper_exception_context(err) print(str(err).encode("utf-8")) # assert outside the block so it works for AssertionError too ! assert success, "Callable did not raise an exception" def expect_raises(except_cls, check_context=True): return _expect_raises(except_cls, check_context=check_context) def expect_raises_message(except_cls, msg, check_context=True): return _expect_raises(except_cls, msg=msg, check_context=check_context) def eq_ignore_whitespace(a, b, msg=None): a = re.sub(r"^\s+?|\n", "", a) a = re.sub(r" {2,}", " ", a) b = re.sub(r"^\s+?|\n", "", b) b = re.sub(r" {2,}", " ", b) assert a == b, msg or "%r != %r" % (a, b) _dialect_mods: Dict[Any, Any] = {} def _get_dialect(name): if name is None or name == "default": return default.DefaultDialect() else: d = sqla_compat._create_url(name).get_dialect()() if name == "postgresql": d.implicit_returning = True elif name == "mssql": d.legacy_schema_aliasing = False return d def expect_warnings(*messages, **kw): """Context manager which expects one or more warnings. With no arguments, squelches all SAWarnings emitted via sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise pass string expressions that will match selected warnings via regex; all non-matching warnings are sent through. The expect version **asserts** that the warnings were in fact seen. Note that the test suite sets SAWarning warnings to raise exceptions. """ return _expect_warnings(Warning, messages, **kw) def emits_python_deprecation_warning(*messages): """Decorator form of expect_warnings(). Note that emits_warning does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with _expect_warnings(DeprecationWarning, assert_=False, *messages): return fn(*args, **kw) return decorate def expect_sqlalchemy_deprecated(*messages, **kw): return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw) def expect_sqlalchemy_deprecated_20(*messages, **kw): return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw) alembic-rel_1_7_6/alembic/testing/env.py000066400000000000000000000246501417624537100203610ustar00rootroot00000000000000#!coding: utf-8 import importlib.machinery import os import shutil import textwrap from sqlalchemy.testing import config from sqlalchemy.testing import provision from . import util as testing_util from .. import util from ..script import Script from ..script import ScriptDirectory def _get_staging_directory(): if provision.FOLLOWER_IDENT: return "scratch_%s" % provision.FOLLOWER_IDENT else: return "scratch" def staging_env(create=True, template="generic", sourceless=False): from alembic import command, script cfg = _testing_config() if create: path = os.path.join(_get_staging_directory(), "scripts") assert not os.path.exists(path), ( "staging directory %s already exists; poor cleanup?" % path ) command.init(cfg, path, template=template) if sourceless: try: # do an import so that a .pyc/.pyo is generated. util.load_python_file(path, "env.py") except AttributeError: # we don't have the migration context set up yet # so running the .env py throws this exception. # theoretically we could be using py_compiler here to # generate .pyc/.pyo without importing but not really # worth it. pass assert sourceless in ( "pep3147_envonly", "simple", "pep3147_everything", ), sourceless make_sourceless( os.path.join(path, "env.py"), "pep3147" if "pep3147" in sourceless else "simple", ) sc = script.ScriptDirectory.from_config(cfg) return sc def clear_staging_env(): from sqlalchemy.testing import engines engines.testing_reaper.close_all() shutil.rmtree(_get_staging_directory(), True) def script_file_fixture(txt): dir_ = os.path.join(_get_staging_directory(), "scripts") path = os.path.join(dir_, "script.py.mako") with open(path, "w") as f: f.write(txt) def env_file_fixture(txt): dir_ = os.path.join(_get_staging_directory(), "scripts") txt = ( """ from alembic import context config = context.config """ + txt ) path = os.path.join(dir_, "env.py") pyc_path = util.pyc_file_from_path(path) if pyc_path: os.unlink(pyc_path) with open(path, "w") as f: f.write(txt) def _sqlite_file_db(tempname="foo.db", future=False): dir_ = os.path.join(_get_staging_directory(), "scripts") url = "sqlite:///%s/%s" % (dir_, tempname) return testing_util.testing_engine(url=url, future=future) def _sqlite_testing_config(sourceless=False, future=False): dir_ = os.path.join(_get_staging_directory(), "scripts") url = "sqlite:///%s/foo.db" % dir_ sqlalchemy_future = future or ("future" in config.db.__class__.__module__) return _write_config_file( """ [alembic] script_location = %s sqlalchemy.url = %s sourceless = %s %s [loggers] keys = root,sqlalchemy [handlers] keys = console [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = DEBUG handlers = qualname = sqlalchemy.engine [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatters] keys = generic [formatter_generic] format = %%(levelname)-5.5s [%%(name)s] %%(message)s datefmt = %%H:%%M:%%S """ % ( dir_, url, "true" if sourceless else "false", "sqlalchemy.future = true" if sqlalchemy_future else "", ) ) def _multi_dir_testing_config(sourceless=False, extra_version_location=""): dir_ = os.path.join(_get_staging_directory(), "scripts") sqlalchemy_future = "future" in config.db.__class__.__module__ url = "sqlite:///%s/foo.db" % dir_ return _write_config_file( """ [alembic] script_location = %s sqlalchemy.url = %s sqlalchemy.future = %s sourceless = %s version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s [loggers] keys = root [handlers] keys = console [logger_root] level = WARN handlers = console qualname = [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatters] keys = generic [formatter_generic] format = %%(levelname)-5.5s [%%(name)s] %%(message)s datefmt = %%H:%%M:%%S """ % ( dir_, url, "true" if sqlalchemy_future else "false", "true" if sourceless else "false", extra_version_location, ) ) def _no_sql_testing_config(dialect="postgresql", directives=""): """use a postgresql url with no host so that connections guaranteed to fail""" dir_ = os.path.join(_get_staging_directory(), "scripts") return _write_config_file( """ [alembic] script_location = %s sqlalchemy.url = %s:// %s [loggers] keys = root [handlers] keys = console [logger_root] level = WARN handlers = console qualname = [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatters] keys = generic [formatter_generic] format = %%(levelname)-5.5s [%%(name)s] %%(message)s datefmt = %%H:%%M:%%S """ % (dir_, dialect, directives) ) def _write_config_file(text): cfg = _testing_config() with open(cfg.config_file_name, "w") as f: f.write(text) return cfg def _testing_config(): from alembic.config import Config if not os.access(_get_staging_directory(), os.F_OK): os.mkdir(_get_staging_directory()) return Config(os.path.join(_get_staging_directory(), "test_alembic.ini")) def write_script( scriptdir, rev_id, content, encoding="ascii", sourceless=False ): old = scriptdir.revision_map.get_revision(rev_id) path = old.path content = textwrap.dedent(content) if encoding: content = content.encode(encoding) with open(path, "wb") as fp: fp.write(content) pyc_path = util.pyc_file_from_path(path) if pyc_path: os.unlink(pyc_path) script = Script._from_path(scriptdir, path) old = scriptdir.revision_map.get_revision(script.revision) if old.down_revision != script.down_revision: raise Exception( "Can't change down_revision " "on a refresh operation." ) scriptdir.revision_map.add_revision(script, _replace=True) if sourceless: make_sourceless( path, "pep3147" if sourceless == "pep3147_everything" else "simple" ) def make_sourceless(path, style): import py_compile py_compile.compile(path) if style == "simple": pyc_path = util.pyc_file_from_path(path) suffix = importlib.machinery.BYTECODE_SUFFIXES[0] filepath, ext = os.path.splitext(path) simple_pyc_path = filepath + suffix shutil.move(pyc_path, simple_pyc_path) pyc_path = simple_pyc_path else: assert style in ("pep3147", "simple") pyc_path = util.pyc_file_from_path(path) assert os.access(pyc_path, os.F_OK) os.unlink(path) def three_rev_fixture(cfg): a = util.rev_id() b = util.rev_id() c = util.rev_id() script = ScriptDirectory.from_config(cfg) script.generate_revision(a, "revision a", refresh=True, head="base") write_script( script, a, """\ "Rev A" revision = '%s' down_revision = None from alembic import op def upgrade(): op.execute("CREATE STEP 1") def downgrade(): op.execute("DROP STEP 1") """ % a, ) script.generate_revision(b, "revision b", refresh=True, head=a) write_script( script, b, f"""# coding: utf-8 "Rev B, méil, %3" revision = '{b}' down_revision = '{a}' from alembic import op def upgrade(): op.execute("CREATE STEP 2") def downgrade(): op.execute("DROP STEP 2") """, encoding="utf-8", ) script.generate_revision(c, "revision c", refresh=True, head=b) write_script( script, c, """\ "Rev C" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 3") def downgrade(): op.execute("DROP STEP 3") """ % (c, b), ) return a, b, c def multi_heads_fixture(cfg, a, b, c): """Create a multiple head fixture from the three-revs fixture""" # a->b->c # -> d -> e # -> f d = util.rev_id() e = util.rev_id() f = util.rev_id() script = ScriptDirectory.from_config(cfg) script.generate_revision( d, "revision d from b", head=b, splice=True, refresh=True ) write_script( script, d, """\ "Rev D" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 4") def downgrade(): op.execute("DROP STEP 4") """ % (d, b), ) script.generate_revision( e, "revision e from d", head=d, splice=True, refresh=True ) write_script( script, e, """\ "Rev E" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 5") def downgrade(): op.execute("DROP STEP 5") """ % (e, d), ) script.generate_revision( f, "revision f from b", head=b, splice=True, refresh=True ) write_script( script, f, """\ "Rev F" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 6") def downgrade(): op.execute("DROP STEP 6") """ % (f, b), ) return d, e, f def _multidb_testing_config(engines): """alembic.ini fixture to work exactly with the 'multidb' template""" dir_ = os.path.join(_get_staging_directory(), "scripts") sqlalchemy_future = "future" in config.db.__class__.__module__ databases = ", ".join(engines.keys()) engines = "\n\n".join( "[%s]\n" "sqlalchemy.url = %s" % (key, value.url) for key, value in engines.items() ) return _write_config_file( """ [alembic] script_location = %s sourceless = false sqlalchemy.future = %s databases = %s %s [loggers] keys = root [handlers] keys = console [logger_root] level = WARN handlers = console qualname = [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatters] keys = generic [formatter_generic] format = %%(levelname)-5.5s [%%(name)s] %%(message)s datefmt = %%H:%%M:%%S """ % (dir_, "true" if sqlalchemy_future else "false", databases, engines) ) alembic-rel_1_7_6/alembic/testing/fixtures.py000066400000000000000000000217101417624537100214340ustar00rootroot00000000000000# coding: utf-8 import configparser from contextlib import contextmanager import io import re from typing import Any from typing import Dict from sqlalchemy import Column from sqlalchemy import inspect from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import testing from sqlalchemy import text from sqlalchemy.testing import config from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import eq_ from sqlalchemy.testing.fixtures import TablesTest as SQLAlchemyTablesTest from sqlalchemy.testing.fixtures import TestBase as SQLAlchemyTestBase import alembic from .assertions import _get_dialect from ..environment import EnvironmentContext from ..migration import MigrationContext from ..operations import Operations from ..util import sqla_compat from ..util.sqla_compat import create_mock_engine from ..util.sqla_compat import sqla_14 from ..util.sqla_compat import sqla_1x testing_config = configparser.ConfigParser() testing_config.read(["test.cfg"]) class TestBase(SQLAlchemyTestBase): if sqla_1x: is_sqlalchemy_future = False else: is_sqlalchemy_future = True @testing.fixture() def ops_context(self, migration_context): with migration_context.begin_transaction(_per_migration=True): yield Operations(migration_context) @testing.fixture def migration_context(self, connection): return MigrationContext.configure( connection, opts=dict(transaction_per_migration=True) ) @testing.fixture def connection(self): with config.db.connect() as conn: yield conn class TablesTest(TestBase, SQLAlchemyTablesTest): pass if sqla_14: from sqlalchemy.testing.fixtures import FutureEngineMixin else: class FutureEngineMixin: # type:ignore[no-redef] __requires__ = ("sqlalchemy_14",) FutureEngineMixin.is_sqlalchemy_future = True def capture_db(dialect="postgresql://"): buf = [] def dump(sql, *multiparams, **params): buf.append(str(sql.compile(dialect=engine.dialect))) engine = create_mock_engine(dialect, dump) return engine, buf _engs: Dict[Any, Any] = {} @contextmanager def capture_context_buffer(**kw): if kw.pop("bytes_io", False): buf = io.BytesIO() else: buf = io.StringIO() kw.update({"dialect_name": "sqlite", "output_buffer": buf}) conf = EnvironmentContext.configure def configure(*arg, **opt): opt.update(**kw) return conf(*arg, **opt) with mock.patch.object(EnvironmentContext, "configure", configure): yield buf @contextmanager def capture_engine_context_buffer(**kw): from .env import _sqlite_file_db from sqlalchemy import event buf = io.StringIO() eng = _sqlite_file_db() conn = eng.connect() @event.listens_for(conn, "before_cursor_execute") def bce(conn, cursor, statement, parameters, context, executemany): buf.write(statement + "\n") kw.update({"connection": conn}) conf = EnvironmentContext.configure def configure(*arg, **opt): opt.update(**kw) return conf(*arg, **opt) with mock.patch.object(EnvironmentContext, "configure", configure): yield buf def op_fixture( dialect="default", as_sql=False, naming_convention=None, literal_binds=False, native_boolean=None, ): opts = {} if naming_convention: opts["target_metadata"] = MetaData(naming_convention=naming_convention) class buffer_: def __init__(self): self.lines = [] def write(self, msg): msg = msg.strip() msg = re.sub(r"[\n\t]", "", msg) if as_sql: # the impl produces soft tabs, # so search for blocks of 4 spaces msg = re.sub(r" ", "", msg) msg = re.sub(r"\;\n*$", "", msg) self.lines.append(msg) def flush(self): pass buf = buffer_() class ctx(MigrationContext): def get_buf(self): return buf def clear_assertions(self): buf.lines[:] = [] def assert_(self, *sql): # TODO: make this more flexible about # whitespace and such eq_(buf.lines, [re.sub(r"[\n\t]", "", s) for s in sql]) def assert_contains(self, sql): for stmt in buf.lines: if re.sub(r"[\n\t]", "", sql) in stmt: return else: assert False, "Could not locate fragment %r in %r" % ( sql, buf.lines, ) if as_sql: opts["as_sql"] = as_sql if literal_binds: opts["literal_binds"] = literal_binds if not sqla_14 and dialect == "mariadb": ctx_dialect = _get_dialect("mysql") ctx_dialect.server_version_info = (10, 4, 0, "MariaDB") else: ctx_dialect = _get_dialect(dialect) if native_boolean is not None: ctx_dialect.supports_native_boolean = native_boolean # this is new as of SQLAlchemy 1.2.7 and is used by SQL Server, # which breaks assumptions in the alembic test suite ctx_dialect.non_native_boolean_check_constraint = True if not as_sql: def execute(stmt, *multiparam, **param): if isinstance(stmt, str): stmt = text(stmt) assert stmt.supports_execution sql = str(stmt.compile(dialect=ctx_dialect)) buf.write(sql) connection = mock.Mock(dialect=ctx_dialect, execute=execute) else: opts["output_buffer"] = buf connection = None context = ctx(ctx_dialect, connection, opts) alembic.op._proxy = Operations(context) return context class AlterColRoundTripFixture: # since these tests are about syntax, use more recent SQLAlchemy as some of # the type / server default compare logic might not work on older # SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle __requires__ = ("alter_column",) def setUp(self): self.conn = config.db.connect() self.ctx = MigrationContext.configure(self.conn) self.op = Operations(self.ctx) self.metadata = MetaData() def _compare_type(self, t1, t2): c1 = Column("q", t1) c2 = Column("q", t2) assert not self.ctx.impl.compare_type( c1, c2 ), "Type objects %r and %r didn't compare as equivalent" % (t1, t2) def _compare_server_default(self, t1, s1, t2, s2): c1 = Column("q", t1, server_default=s1) c2 = Column("q", t2, server_default=s2) assert not self.ctx.impl.compare_server_default( c1, c2, s2, s1 ), "server defaults %r and %r didn't compare as equivalent" % (s1, s2) def tearDown(self): sqla_compat._safe_rollback_connection_transaction(self.conn) with self.conn.begin(): self.metadata.drop_all(self.conn) self.conn.close() def _run_alter_col(self, from_, to_, compare=None): column = Column( from_.get("name", "colname"), from_.get("type", String(10)), nullable=from_.get("nullable", True), server_default=from_.get("server_default", None), # comment=from_.get("comment", None) ) t = Table("x", self.metadata, column) with sqla_compat._ensure_scope_for_ddl(self.conn): t.create(self.conn) insp = inspect(self.conn) old_col = insp.get_columns("x")[0] # TODO: conditional comment support self.op.alter_column( "x", column.name, existing_type=column.type, existing_server_default=column.server_default if column.server_default is not None else False, existing_nullable=True if column.nullable else False, # existing_comment=column.comment, nullable=to_.get("nullable", None), # modify_comment=False, server_default=to_.get("server_default", False), new_column_name=to_.get("name", None), type_=to_.get("type", None), ) insp = inspect(self.conn) new_col = insp.get_columns("x")[0] if compare is None: compare = to_ eq_( new_col["name"], compare["name"] if "name" in compare else column.name, ) self._compare_type( new_col["type"], compare.get("type", old_col["type"]) ) eq_(new_col["nullable"], compare.get("nullable", column.nullable)) self._compare_server_default( new_col["type"], new_col.get("default", None), compare.get("type", old_col["type"]), compare["server_default"].text if "server_default" in compare else column.server_default.arg.text if column.server_default is not None else None, ) alembic-rel_1_7_6/alembic/testing/plugin/000077500000000000000000000000001417624537100205065ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/testing/plugin/__init__.py000066400000000000000000000000001417624537100226050ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/testing/plugin/bootstrap.py000066400000000000000000000000621417624537100230730ustar00rootroot00000000000000""" Bootstrapper for test framework plugins. """ alembic-rel_1_7_6/alembic/testing/requirements.py000066400000000000000000000113331417624537100223060ustar00rootroot00000000000000from sqlalchemy.testing.requirements import Requirements from alembic import util from alembic.util import sqla_compat from ..testing import exclusions class SuiteRequirements(Requirements): @property def schemas(self): """Target database must support external schemas, and have one named 'test_schema'.""" return exclusions.open() @property def autocommit_isolation(self): """target database should support 'AUTOCOMMIT' isolation level""" return exclusions.closed() @property def unique_constraint_reflection(self): def doesnt_have_check_uq_constraints(config): from sqlalchemy import inspect insp = inspect(config.db) try: insp.get_unique_constraints("x") except NotImplementedError: return True except TypeError: return True except Exception: pass return False return exclusions.skip_if(doesnt_have_check_uq_constraints) @property def sequences(self): """Target database must support SEQUENCEs.""" return exclusions.only_if( [lambda config: config.db.dialect.supports_sequences], "no sequence support", ) @property def foreign_key_match(self): return exclusions.open() @property def foreign_key_constraint_reflection(self): return exclusions.open() @property def check_constraints_w_enforcement(self): """Target database must support check constraints and also enforce them.""" return exclusions.open() @property def reflects_pk_names(self): return exclusions.closed() @property def reflects_fk_options(self): return exclusions.closed() @property def sqlalchemy_13(self): return exclusions.skip_if( lambda config: not util.sqla_13, "SQLAlchemy 1.3 or greater required", ) @property def sqlalchemy_14(self): return exclusions.skip_if( lambda config: not util.sqla_14, "SQLAlchemy 1.4 or greater required", ) @property def sqlalchemy_1x(self): return exclusions.skip_if( lambda config: not util.sqla_1x, "SQLAlchemy 1.x test", ) @property def comments(self): return exclusions.only_if( lambda config: config.db.dialect.supports_comments ) @property def alter_column(self): return exclusions.open() @property def computed_columns(self): return exclusions.closed() @property def computed_columns_api(self): return exclusions.only_if( exclusions.BooleanPredicate(sqla_compat.has_computed) ) @property def computed_reflects_normally(self): return exclusions.only_if( exclusions.BooleanPredicate(sqla_compat.has_computed_reflection) ) @property def computed_reflects_as_server_default(self): return exclusions.closed() @property def computed_doesnt_reflect_as_server_default(self): return exclusions.closed() @property def autoincrement_on_composite_pk(self): return exclusions.closed() @property def fk_ondelete_is_reflected(self): return exclusions.closed() @property def fk_onupdate_is_reflected(self): return exclusions.closed() @property def fk_onupdate(self): return exclusions.open() @property def fk_ondelete_restrict(self): return exclusions.open() @property def fk_onupdate_restrict(self): return exclusions.open() @property def fk_ondelete_noaction(self): return exclusions.open() @property def fk_initially(self): return exclusions.closed() @property def fk_deferrable(self): return exclusions.closed() @property def fk_deferrable_is_reflected(self): return exclusions.closed() @property def fk_names(self): return exclusions.open() @property def integer_subtype_comparisons(self): return exclusions.open() @property def no_name_normalize(self): return exclusions.skip_if( lambda config: config.db.dialect.requires_name_normalize ) @property def identity_columns(self): return exclusions.closed() @property def identity_columns_alter(self): return exclusions.closed() @property def identity_columns_api(self): return exclusions.only_if( exclusions.BooleanPredicate(sqla_compat.has_identity) ) @property def supports_identity_on_null(self): return exclusions.closed() alembic-rel_1_7_6/alembic/testing/schemacompare.py000066400000000000000000000104251417624537100223730ustar00rootroot00000000000000from itertools import zip_longest from sqlalchemy import schema class CompareTable: def __init__(self, table): self.table = table def __eq__(self, other): if self.table.name != other.name or self.table.schema != other.schema: return False for c1, c2 in zip_longest(self.table.c, other.c): if (c1 is None and c2 is not None) or ( c2 is None and c1 is not None ): return False if CompareColumn(c1) != c2: return False return True # TODO: compare constraints, indexes def __ne__(self, other): return not self.__eq__(other) class CompareColumn: def __init__(self, column): self.column = column def __eq__(self, other): return ( self.column.name == other.name and self.column.nullable == other.nullable ) # TODO: datatypes etc def __ne__(self, other): return not self.__eq__(other) class CompareIndex: def __init__(self, index): self.index = index def __eq__(self, other): return ( str(schema.CreateIndex(self.index)) == str(schema.CreateIndex(other)) and self.index.dialect_kwargs == other.dialect_kwargs ) def __ne__(self, other): return not self.__eq__(other) class CompareCheckConstraint: def __init__(self, constraint): self.constraint = constraint def __eq__(self, other): return ( isinstance(other, schema.CheckConstraint) and self.constraint.name == other.name and (str(self.constraint.sqltext) == str(other.sqltext)) and (other.table.name == self.constraint.table.name) and other.table.schema == self.constraint.table.schema ) def __ne__(self, other): return not self.__eq__(other) class CompareForeignKey: def __init__(self, constraint): self.constraint = constraint def __eq__(self, other): r1 = ( isinstance(other, schema.ForeignKeyConstraint) and self.constraint.name == other.name and (other.table.name == self.constraint.table.name) and other.table.schema == self.constraint.table.schema ) if not r1: return False for c1, c2 in zip_longest(self.constraint.columns, other.columns): if (c1 is None and c2 is not None) or ( c2 is None and c1 is not None ): return False if CompareColumn(c1) != c2: return False return True def __ne__(self, other): return not self.__eq__(other) class ComparePrimaryKey: def __init__(self, constraint): self.constraint = constraint def __eq__(self, other): r1 = ( isinstance(other, schema.PrimaryKeyConstraint) and self.constraint.name == other.name and (other.table.name == self.constraint.table.name) and other.table.schema == self.constraint.table.schema ) if not r1: return False for c1, c2 in zip_longest(self.constraint.columns, other.columns): if (c1 is None and c2 is not None) or ( c2 is None and c1 is not None ): return False if CompareColumn(c1) != c2: return False return True def __ne__(self, other): return not self.__eq__(other) class CompareUniqueConstraint: def __init__(self, constraint): self.constraint = constraint def __eq__(self, other): r1 = ( isinstance(other, schema.UniqueConstraint) and self.constraint.name == other.name and (other.table.name == self.constraint.table.name) and other.table.schema == self.constraint.table.schema ) if not r1: return False for c1, c2 in zip_longest(self.constraint.columns, other.columns): if (c1 is None and c2 is not None) or ( c2 is None and c1 is not None ): return False if CompareColumn(c1) != c2: return False return True def __ne__(self, other): return not self.__eq__(other) alembic-rel_1_7_6/alembic/testing/suite/000077500000000000000000000000001417624537100203415ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/testing/suite/__init__.py000066400000000000000000000004401417624537100224500ustar00rootroot00000000000000from .test_autogen_comments import * # noqa from .test_autogen_computed import * # noqa from .test_autogen_diffs import * # noqa from .test_autogen_fks import * # noqa from .test_autogen_identity import * # noqa from .test_environment import * # noqa from .test_op import * # noqa alembic-rel_1_7_6/alembic/testing/suite/_autogen_fixtures.py000066400000000000000000000232131417624537100244460ustar00rootroot00000000000000from typing import Any from typing import Dict from sqlalchemy import CHAR from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import event from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Numeric from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text from sqlalchemy import text from sqlalchemy import UniqueConstraint from ... import autogenerate from ... import util from ...autogenerate import api from ...ddl.base import _fk_spec from ...migration import MigrationContext from ...operations import ops from ...testing import config from ...testing import eq_ from ...testing.env import clear_staging_env from ...testing.env import staging_env names_in_this_test = set() @event.listens_for(Table, "after_parent_attach") def new_table(table, parent): names_in_this_test.add(table.name) def _default_include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name in names_in_this_test else: return True _default_object_filters = _default_include_object _default_name_filters = None class ModelOne: __requires__ = ("unique_constraint_reflection",) schema = None @classmethod def _get_db_schema(cls): schema = cls.schema m = MetaData(schema=schema) Table( "user", m, Column("id", Integer, primary_key=True), Column("name", String(50)), Column("a1", Text), Column("pw", String(50)), Index("pw_idx", "pw"), ) Table( "address", m, Column("id", Integer, primary_key=True), Column("email_address", String(100), nullable=False), ) Table( "order", m, Column("order_id", Integer, primary_key=True), Column( "amount", Numeric(8, 2), nullable=False, server_default=text("0"), ), CheckConstraint("amount >= 0", name="ck_order_amount"), ) Table( "extra", m, Column("x", CHAR), Column("uid", Integer, ForeignKey("user.id")), ) return m @classmethod def _get_model_schema(cls): schema = cls.schema m = MetaData(schema=schema) Table( "user", m, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", Text, server_default="x"), ) Table( "address", m, Column("id", Integer, primary_key=True), Column("email_address", String(100), nullable=False), Column("street", String(50)), UniqueConstraint("email_address", name="uq_email"), ) Table( "order", m, Column("order_id", Integer, primary_key=True), Column( "amount", Numeric(10, 2), nullable=True, server_default=text("0"), ), Column("user_id", Integer, ForeignKey("user.id")), CheckConstraint("amount > -1", name="ck_order_amount"), ) Table( "item", m, Column("id", Integer, primary_key=True), Column("description", String(100)), Column("order_id", Integer, ForeignKey("order.order_id")), CheckConstraint("len(description) > 5"), ) return m class _ComparesFKs: def _assert_fk_diff( self, diff, type_, source_table, source_columns, target_table, target_columns, name=None, conditional_name=None, source_schema=None, onupdate=None, ondelete=None, initially=None, deferrable=None, ): # the public API for ForeignKeyConstraint was not very rich # in 0.7, 0.8, so here we use the well-known but slightly # private API to get at its elements ( fk_source_schema, fk_source_table, fk_source_columns, fk_target_schema, fk_target_table, fk_target_columns, fk_onupdate, fk_ondelete, fk_deferrable, fk_initially, ) = _fk_spec(diff[1]) eq_(diff[0], type_) eq_(fk_source_table, source_table) eq_(fk_source_columns, source_columns) eq_(fk_target_table, target_table) eq_(fk_source_schema, source_schema) eq_(fk_onupdate, onupdate) eq_(fk_ondelete, ondelete) eq_(fk_initially, initially) eq_(fk_deferrable, deferrable) eq_([elem.column.name for elem in diff[1].elements], target_columns) if conditional_name is not None: if conditional_name == "servergenerated": fks = inspect(self.bind).get_foreign_keys(source_table) server_fk_name = fks[0]["name"] eq_(diff[1].name, server_fk_name) else: eq_(diff[1].name, conditional_name) else: eq_(diff[1].name, name) class AutogenTest(_ComparesFKs): def _flatten_diffs(self, diffs): for d in diffs: if isinstance(d, list): for fd in self._flatten_diffs(d): yield fd else: yield d @classmethod def _get_bind(cls): return config.db configure_opts: Dict[Any, Any] = {} @classmethod def setup_class(cls): staging_env() cls.bind = cls._get_bind() cls.m1 = cls._get_db_schema() cls.m1.create_all(cls.bind) cls.m2 = cls._get_model_schema() @classmethod def teardown_class(cls): cls.m1.drop_all(cls.bind) clear_staging_env() def setUp(self): self.conn = conn = self.bind.connect() ctx_opts = { "compare_type": True, "compare_server_default": True, "target_metadata": self.m2, "upgrade_token": "upgrades", "downgrade_token": "downgrades", "alembic_module_prefix": "op.", "sqlalchemy_module_prefix": "sa.", "include_object": _default_object_filters, "include_name": _default_name_filters, } if self.configure_opts: ctx_opts.update(self.configure_opts) self.context = context = MigrationContext.configure( connection=conn, opts=ctx_opts ) self.autogen_context = api.AutogenContext(context, self.m2) def tearDown(self): self.conn.close() def _update_context( self, object_filters=None, name_filters=None, include_schemas=None ): if include_schemas is not None: self.autogen_context.opts["include_schemas"] = include_schemas if object_filters is not None: self.autogen_context._object_filters = [object_filters] if name_filters is not None: self.autogen_context._name_filters = [name_filters] return self.autogen_context class AutogenFixtureTest(_ComparesFKs): def _fixture( self, m1, m2, include_schemas=False, opts=None, object_filters=_default_object_filters, name_filters=_default_name_filters, return_ops=False, max_identifier_length=None, ): if max_identifier_length: dialect = self.bind.dialect existing_length = dialect.max_identifier_length dialect.max_identifier_length = ( dialect._user_defined_max_identifier_length ) = max_identifier_length try: self._alembic_metadata, model_metadata = m1, m2 for m in util.to_list(self._alembic_metadata): m.create_all(self.bind) with self.bind.connect() as conn: ctx_opts = { "compare_type": True, "compare_server_default": True, "target_metadata": model_metadata, "upgrade_token": "upgrades", "downgrade_token": "downgrades", "alembic_module_prefix": "op.", "sqlalchemy_module_prefix": "sa.", "include_object": object_filters, "include_name": name_filters, "include_schemas": include_schemas, } if opts: ctx_opts.update(opts) self.context = context = MigrationContext.configure( connection=conn, opts=ctx_opts ) autogen_context = api.AutogenContext(context, model_metadata) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(autogen_context, uo) if return_ops: return uo else: return uo.as_diffs() finally: if max_identifier_length: dialect = self.bind.dialect dialect.max_identifier_length = ( dialect._user_defined_max_identifier_length ) = existing_length reports_unnamed_constraints = False def setUp(self): staging_env() self.bind = config.db def tearDown(self): if hasattr(self, "_alembic_metadata"): for m in util.to_list(self._alembic_metadata): m.drop_all(self.bind) clear_staging_env() alembic-rel_1_7_6/alembic/testing/suite/test_autogen_comments.py000066400000000000000000000142131417624537100253220ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import Float from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from ._autogen_fixtures import AutogenFixtureTest from ...testing import eq_ from ...testing import mock from ...testing import TestBase class AutogenerateCommentsTest(AutogenFixtureTest, TestBase): __backend__ = True __requires__ = ("comments",) def test_existing_table_comment_no_change(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), comment="this is some table", ) Table( "some_table", m2, Column("test", String(10), primary_key=True), comment="this is some table", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_add_table_comment(self): m1 = MetaData() m2 = MetaData() Table("some_table", m1, Column("test", String(10), primary_key=True)) Table( "some_table", m2, Column("test", String(10), primary_key=True), comment="this is some table", ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_table_comment") eq_(diffs[0][1].comment, "this is some table") eq_(diffs[0][2], None) def test_remove_table_comment(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), comment="this is some table", ) Table("some_table", m2, Column("test", String(10), primary_key=True)) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_table_comment") eq_(diffs[0][1].comment, None) def test_alter_table_comment(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), comment="this is some table", ) Table( "some_table", m2, Column("test", String(10), primary_key=True), comment="this is also some table", ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_table_comment") eq_(diffs[0][1].comment, "this is also some table") eq_(diffs[0][2], "this is some table") def test_existing_column_comment_no_change(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), Column("amount", Float, comment="the amount"), ) Table( "some_table", m2, Column("test", String(10), primary_key=True), Column("amount", Float, comment="the amount"), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_add_column_comment(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), Column("amount", Float), ) Table( "some_table", m2, Column("test", String(10), primary_key=True), Column("amount", Float, comment="the amount"), ) diffs = self._fixture(m1, m2) eq_( diffs, [ [ ( "modify_comment", None, "some_table", "amount", { "existing_nullable": True, "existing_type": mock.ANY, "existing_server_default": False, }, None, "the amount", ) ] ], ) def test_remove_column_comment(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), Column("amount", Float, comment="the amount"), ) Table( "some_table", m2, Column("test", String(10), primary_key=True), Column("amount", Float), ) diffs = self._fixture(m1, m2) eq_( diffs, [ [ ( "modify_comment", None, "some_table", "amount", { "existing_nullable": True, "existing_type": mock.ANY, "existing_server_default": False, }, "the amount", None, ) ] ], ) def test_alter_column_comment(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), Column("amount", Float, comment="the amount"), ) Table( "some_table", m2, Column("test", String(10), primary_key=True), Column("amount", Float, comment="the adjusted amount"), ) diffs = self._fixture(m1, m2) eq_( diffs, [ [ ( "modify_comment", None, "some_table", "amount", { "existing_nullable": True, "existing_type": mock.ANY, "existing_server_default": False, }, "the amount", "the adjusted amount", ) ] ], ) alembic-rel_1_7_6/alembic/testing/suite/test_autogen_computed.py000066400000000000000000000136751417624537100253300ustar00rootroot00000000000000import sqlalchemy as sa from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from ._autogen_fixtures import AutogenFixtureTest from ... import testing from ...testing import config from ...testing import eq_ from ...testing import exclusions from ...testing import is_ from ...testing import is_true from ...testing import mock from ...testing import TestBase class AutogenerateComputedTest(AutogenFixtureTest, TestBase): __requires__ = ("computed_columns",) __backend__ = True def test_add_computed_column(self): m1 = MetaData() m2 = MetaData() Table("user", m1, Column("id", Integer, primary_key=True)) Table( "user", m2, Column("id", Integer, primary_key=True), Column("foo", Integer, sa.Computed("5")), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_column") eq_(diffs[0][2], "user") eq_(diffs[0][3].name, "foo") c = diffs[0][3].computed is_true(isinstance(c, sa.Computed)) is_(c.persisted, None) eq_(str(c.sqltext), "5") def test_remove_computed_column(self): m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer, primary_key=True), Column("foo", Integer, sa.Computed("5")), ) Table("user", m2, Column("id", Integer, primary_key=True)) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_column") eq_(diffs[0][2], "user") c = diffs[0][3] eq_(c.name, "foo") if config.requirements.computed_reflects_normally.enabled: is_true(isinstance(c.computed, sa.Computed)) else: is_(c.computed, None) if config.requirements.computed_reflects_as_server_default.enabled: is_true(isinstance(c.server_default, sa.DefaultClause)) eq_(str(c.server_default.arg.text), "5") elif config.requirements.computed_reflects_normally.enabled: is_true(isinstance(c.computed, sa.Computed)) else: is_(c.computed, None) @testing.combinations( lambda: (None, sa.Computed("bar*5")), (lambda: (sa.Computed("bar*5"), None)), lambda: ( sa.Computed("bar*5"), sa.Computed("bar * 42", persisted=True), ), lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")), ) @config.requirements.computed_reflects_normally def test_cant_change_computed_warning(self, test_case): arg_before, arg_after = testing.resolve_lambda(test_case, **locals()) m1 = MetaData() m2 = MetaData() arg_before = [] if arg_before is None else [arg_before] arg_after = [] if arg_after is None else [arg_after] Table( "user", m1, Column("id", Integer, primary_key=True), Column("bar", Integer), Column("foo", Integer, *arg_before), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("bar", Integer), Column("foo", Integer, *arg_after), ) with mock.patch("alembic.util.warn") as mock_warn: diffs = self._fixture(m1, m2) eq_( mock_warn.mock_calls, [mock.call("Computed default on user.foo cannot be modified")], ) eq_(list(diffs), []) @testing.combinations( lambda: (None, None), lambda: (sa.Computed("5"), sa.Computed("5")), lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")), ( lambda: (sa.Computed("bar*5"), None), config.requirements.computed_doesnt_reflect_as_server_default, ), ) def test_computed_unchanged(self, test_case): arg_before, arg_after = testing.resolve_lambda(test_case, **locals()) m1 = MetaData() m2 = MetaData() arg_before = [] if arg_before is None else [arg_before] arg_after = [] if arg_after is None else [arg_after] Table( "user", m1, Column("id", Integer, primary_key=True), Column("bar", Integer), Column("foo", Integer, *arg_before), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("bar", Integer), Column("foo", Integer, *arg_after), ) with mock.patch("alembic.util.warn") as mock_warn: diffs = self._fixture(m1, m2) eq_(mock_warn.mock_calls, []) eq_(list(diffs), []) @config.requirements.computed_reflects_as_server_default def test_remove_computed_default_on_computed(self): """Asserts the current behavior which is that on PG and Oracle, the GENERATED ALWAYS AS is reflected as a server default which we can't tell is actually "computed", so these come out as a modification to the server default. """ m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer, primary_key=True), Column("bar", Integer), Column("foo", Integer, sa.Computed("bar + 42")), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("bar", Integer), Column("foo", Integer), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0][0], "modify_default") eq_(diffs[0][0][2], "user") eq_(diffs[0][0][3], "foo") old = diffs[0][0][-2] new = diffs[0][0][-1] is_(new, None) is_true(isinstance(old, sa.DefaultClause)) if exclusions.against(config, "postgresql"): eq_(str(old.arg.text), "(bar + 42)") elif exclusions.against(config, "oracle"): eq_(str(old.arg.text), '"BAR"+42') alembic-rel_1_7_6/alembic/testing/suite/test_autogen_diffs.py000066400000000000000000000203121417624537100245650ustar00rootroot00000000000000from sqlalchemy import BigInteger from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy.testing import in_ from ._autogen_fixtures import AutogenFixtureTest from ... import testing from ...testing import config from ...testing import eq_ from ...testing import is_ from ...testing import TestBase class AlterColumnTest(AutogenFixtureTest, TestBase): __backend__ = True @testing.combinations((True,), (False,)) @config.requirements.comments def test_all_existings_filled(self, pk): m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", Integer, primary_key=pk)) Table("a", m2, Column("x", Integer, comment="x", primary_key=pk)) alter_col = self._assert_alter_col(m1, m2, pk) eq_(alter_col.modify_comment, "x") @testing.combinations((True,), (False,)) @config.requirements.comments def test_all_existings_filled_in_notnull(self, pk): m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", Integer, nullable=False, primary_key=pk)) Table( "a", m2, Column("x", Integer, nullable=False, comment="x", primary_key=pk), ) self._assert_alter_col(m1, m2, pk, nullable=False) @testing.combinations((True,), (False,)) @config.requirements.comments def test_all_existings_filled_in_comment(self, pk): m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", Integer, comment="old", primary_key=pk)) Table("a", m2, Column("x", Integer, comment="new", primary_key=pk)) alter_col = self._assert_alter_col(m1, m2, pk) eq_(alter_col.existing_comment, "old") @testing.combinations((True,), (False,)) @config.requirements.comments def test_all_existings_filled_in_server_default(self, pk): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("x", Integer, server_default="5", primary_key=pk) ) Table( "a", m2, Column( "x", Integer, server_default="5", comment="new", primary_key=pk ), ) alter_col = self._assert_alter_col(m1, m2, pk) in_("5", alter_col.existing_server_default.arg.text) def _assert_alter_col(self, m1, m2, pk, nullable=None): ops = self._fixture(m1, m2, return_ops=True) modify_table = ops.ops[-1] alter_col = modify_table.ops[0] if nullable is None: eq_(alter_col.existing_nullable, not pk) else: eq_(alter_col.existing_nullable, nullable) assert alter_col.existing_type._compare_type_affinity(Integer()) return alter_col class AutoincrementTest(AutogenFixtureTest, TestBase): __backend__ = True __requires__ = ("integer_subtype_comparisons",) def test_alter_column_autoincrement_none(self): m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", Integer, nullable=False)) Table("a", m2, Column("x", Integer, nullable=True)) ops = self._fixture(m1, m2, return_ops=True) assert "autoincrement" not in ops.ops[0].ops[0].kw def test_alter_column_autoincrement_pk_false(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("x", Integer, primary_key=True, autoincrement=False), ) Table( "a", m2, Column("x", BigInteger, primary_key=True, autoincrement=False), ) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], False) def test_alter_column_autoincrement_pk_implicit_true(self): m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", Integer, primary_key=True)) Table("a", m2, Column("x", BigInteger, primary_key=True)) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], True) def test_alter_column_autoincrement_pk_explicit_true(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("x", Integer, primary_key=True, autoincrement=True) ) Table( "a", m2, Column("x", BigInteger, primary_key=True, autoincrement=True), ) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], True) def test_alter_column_autoincrement_nonpk_false(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("id", Integer, primary_key=True), Column("x", Integer, autoincrement=False), ) Table( "a", m2, Column("id", Integer, primary_key=True), Column("x", BigInteger, autoincrement=False), ) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], False) def test_alter_column_autoincrement_nonpk_implicit_false(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("id", Integer, primary_key=True), Column("x", Integer), ) Table( "a", m2, Column("id", Integer, primary_key=True), Column("x", BigInteger), ) ops = self._fixture(m1, m2, return_ops=True) assert "autoincrement" not in ops.ops[0].ops[0].kw def test_alter_column_autoincrement_nonpk_explicit_true(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("id", Integer, primary_key=True, autoincrement=False), Column("x", Integer, autoincrement=True), ) Table( "a", m2, Column("id", Integer, primary_key=True, autoincrement=False), Column("x", BigInteger, autoincrement=True), ) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], True) def test_alter_column_autoincrement_compositepk_false(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("id", Integer, primary_key=True), Column("x", Integer, primary_key=True, autoincrement=False), ) Table( "a", m2, Column("id", Integer, primary_key=True), Column("x", BigInteger, primary_key=True, autoincrement=False), ) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], False) def test_alter_column_autoincrement_compositepk_implicit_false(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("id", Integer, primary_key=True), Column("x", Integer, primary_key=True), ) Table( "a", m2, Column("id", Integer, primary_key=True), Column("x", BigInteger, primary_key=True), ) ops = self._fixture(m1, m2, return_ops=True) assert "autoincrement" not in ops.ops[0].ops[0].kw @config.requirements.autoincrement_on_composite_pk def test_alter_column_autoincrement_compositepk_explicit_true(self): m1 = MetaData() m2 = MetaData() Table( "a", m1, Column("id", Integer, primary_key=True, autoincrement=False), Column("x", Integer, primary_key=True, autoincrement=True), # on SQLA 1.0 and earlier, this being present # trips the "add KEY for the primary key" so that the # AUTO_INCREMENT keyword is accepted by MySQL. SQLA 1.1 and # greater the columns are just reorganized. mysql_engine="InnoDB", ) Table( "a", m2, Column("id", Integer, primary_key=True, autoincrement=False), Column("x", BigInteger, primary_key=True, autoincrement=True), ) ops = self._fixture(m1, m2, return_ops=True) is_(ops.ops[0].ops[0].kw["autoincrement"], True) alembic-rel_1_7_6/alembic/testing/suite/test_autogen_fks.py000066400000000000000000001002371417624537100242620ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import ForeignKeyConstraint from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from ._autogen_fixtures import AutogenFixtureTest from ...testing import combinations from ...testing import config from ...testing import eq_ from ...testing import mock from ...testing import TestBase class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase): __backend__ = True __requires__ = ("foreign_key_constraint_reflection",) def test_remove_fk(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", String(10)), ForeignKeyConstraint(["test2"], ["some_table.test"]), ) Table( "some_table", m2, Column("test", String(10), primary_key=True), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", String(10)), ) diffs = self._fixture(m1, m2) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["test2"], "some_table", ["test"], conditional_name="servergenerated", ) def test_add_fk(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id", Integer, primary_key=True), Column("test", String(10)), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", String(10)), ) Table( "some_table", m2, Column("id", Integer, primary_key=True), Column("test", String(10)), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", String(10)), ForeignKeyConstraint(["test2"], ["some_table.test"]), ) diffs = self._fixture(m1, m2) self._assert_fk_diff( diffs[0], "add_fk", "user", ["test2"], "some_table", ["test"] ) def test_no_change(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id", Integer, primary_key=True), Column("test", String(10)), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", Integer), ForeignKeyConstraint(["test2"], ["some_table.id"]), ) Table( "some_table", m2, Column("id", Integer, primary_key=True), Column("test", String(10)), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", Integer), ForeignKeyConstraint(["test2"], ["some_table.id"]), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_no_change_composite_fk(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["some_table.id_1", "some_table.id_2"], ), ) Table( "some_table", m2, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["some_table.id_1", "some_table.id_2"], ), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_casing_convention_changed_so_put_drops_first(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("test", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", String(10)), ForeignKeyConstraint(["test2"], ["some_table.test"], name="MyFK"), ) Table( "some_table", m2, Column("test", String(10), primary_key=True), ) # foreign key autogen currently does not take "name" into account, # so change the def just for the purposes of testing the # add/drop order for now. Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("test2", String(10)), ForeignKeyConstraint(["a1"], ["some_table.test"], name="myfk"), ) diffs = self._fixture(m1, m2) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["test2"], "some_table", ["test"], name="MyFK" if config.requirements.fk_names.enabled else None, ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["a1"], "some_table", ["test"], name="myfk", ) def test_add_composite_fk_with_name(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ) Table( "some_table", m2, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["some_table.id_1", "some_table.id_2"], name="fk_test_name", ), ) diffs = self._fixture(m1, m2) self._assert_fk_diff( diffs[0], "add_fk", "user", ["other_id_1", "other_id_2"], "some_table", ["id_1", "id_2"], name="fk_test_name", ) @config.requirements.no_name_normalize def test_remove_composite_fk(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["some_table.id_1", "some_table.id_2"], name="fk_test_name", ), ) Table( "some_table", m2, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ) diffs = self._fixture(m1, m2) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["other_id_1", "other_id_2"], "some_table", ["id_1", "id_2"], conditional_name="fk_test_name", ) def test_add_fk_colkeys(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ) Table( "some_table", m2, Column("id_1", String(10), key="tid1", primary_key=True), Column("id_2", String(10), key="tid2", primary_key=True), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("other_id_1", String(10), key="oid1"), Column("other_id_2", String(10), key="oid2"), ForeignKeyConstraint( ["oid1", "oid2"], ["some_table.tid1", "some_table.tid2"], name="fk_test_name", ), ) diffs = self._fixture(m1, m2) self._assert_fk_diff( diffs[0], "add_fk", "user", ["other_id_1", "other_id_2"], "some_table", ["id_1", "id_2"], name="fk_test_name", ) def test_no_change_colkeys(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id_1", String(10), primary_key=True), Column("id_2", String(10), primary_key=True), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("other_id_1", String(10)), Column("other_id_2", String(10)), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["some_table.id_1", "some_table.id_2"], ), ) Table( "some_table", m2, Column("id_1", String(10), key="tid1", primary_key=True), Column("id_2", String(10), key="tid2", primary_key=True), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("other_id_1", String(10), key="oid1"), Column("other_id_2", String(10), key="oid2"), ForeignKeyConstraint( ["oid1", "oid2"], ["some_table.tid1", "some_table.tid2"] ), ) diffs = self._fixture(m1, m2) eq_(diffs, []) class IncludeHooksTest(AutogenFixtureTest, TestBase): __backend__ = True __requires__ = ("fk_names",) @combinations(("object",), ("name",)) @config.requirements.no_name_normalize def test_remove_connection_fk(self, hook_type): m1 = MetaData() m2 = MetaData() ref = Table( "ref", m1, Column("id", Integer, primary_key=True), ) t1 = Table( "t", m1, Column("x", Integer), Column("y", Integer), ) t1.append_constraint( ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1") ) t1.append_constraint( ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2") ) ref = Table( "ref", m2, Column("id", Integer, primary_key=True), ) Table( "t", m2, Column("x", Integer), Column("y", Integer), ) if hook_type == "object": def include_object(object_, name, type_, reflected, compare_to): return not ( isinstance(object_, ForeignKeyConstraint) and type_ == "foreign_key_constraint" and reflected and name == "fk1" ) diffs = self._fixture(m1, m2, object_filters=include_object) elif hook_type == "name": def include_name(name, type_, parent_names): if name == "fk1": if type_ == "index": # MariaDB thing return True eq_(type_, "foreign_key_constraint") eq_( parent_names, { "schema_name": None, "table_name": "t", "schema_qualified_table_name": "t", }, ) return False else: return True diffs = self._fixture(m1, m2, name_filters=include_name) self._assert_fk_diff( diffs[0], "remove_fk", "t", ["y"], "ref", ["id"], conditional_name="fk2", ) eq_(len(diffs), 1) def test_add_metadata_fk(self): m1 = MetaData() m2 = MetaData() Table( "ref", m1, Column("id", Integer, primary_key=True), ) Table( "t", m1, Column("x", Integer), Column("y", Integer), ) ref = Table( "ref", m2, Column("id", Integer, primary_key=True), ) t2 = Table( "t", m2, Column("x", Integer), Column("y", Integer), ) t2.append_constraint( ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1") ) t2.append_constraint( ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2") ) def include_object(object_, name, type_, reflected, compare_to): return not ( isinstance(object_, ForeignKeyConstraint) and type_ == "foreign_key_constraint" and not reflected and name == "fk1" ) diffs = self._fixture(m1, m2, object_filters=include_object) self._assert_fk_diff( diffs[0], "add_fk", "t", ["y"], "ref", ["id"], name="fk2" ) eq_(len(diffs), 1) @combinations(("object",), ("name",)) @config.requirements.no_name_normalize def test_change_fk(self, hook_type): m1 = MetaData() m2 = MetaData() r1a = Table( "ref_a", m1, Column("a", Integer, primary_key=True), ) Table( "ref_b", m1, Column("a", Integer, primary_key=True), Column("b", Integer, primary_key=True), ) t1 = Table( "t", m1, Column("x", Integer), Column("y", Integer), Column("z", Integer), ) t1.append_constraint( ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1") ) t1.append_constraint( ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2") ) Table( "ref_a", m2, Column("a", Integer, primary_key=True), ) r2b = Table( "ref_b", m2, Column("a", Integer, primary_key=True), Column("b", Integer, primary_key=True), ) t2 = Table( "t", m2, Column("x", Integer), Column("y", Integer), Column("z", Integer), ) t2.append_constraint( ForeignKeyConstraint( [t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1" ) ) t2.append_constraint( ForeignKeyConstraint( [t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2" ) ) if hook_type == "object": def include_object(object_, name, type_, reflected, compare_to): return not ( isinstance(object_, ForeignKeyConstraint) and type_ == "foreign_key_constraint" and name == "fk1" ) diffs = self._fixture(m1, m2, object_filters=include_object) elif hook_type == "name": def include_name(name, type_, parent_names): if type_ == "index": return True # MariaDB thing if name == "fk1": eq_(type_, "foreign_key_constraint") eq_( parent_names, { "schema_name": None, "table_name": "t", "schema_qualified_table_name": "t", }, ) return False else: return True diffs = self._fixture(m1, m2, name_filters=include_name) if hook_type == "object": self._assert_fk_diff( diffs[0], "remove_fk", "t", ["y"], "ref_a", ["a"], name="fk2" ) self._assert_fk_diff( diffs[1], "add_fk", "t", ["y", "z"], "ref_b", ["a", "b"], name="fk2", ) eq_(len(diffs), 2) elif hook_type == "name": eq_( {(d[0], d[1].name) for d in diffs}, {("add_fk", "fk2"), ("add_fk", "fk1"), ("remove_fk", "fk2")}, ) class AutogenerateFKOptionsTest(AutogenFixtureTest, TestBase): __backend__ = True def _fk_opts_fixture(self, old_opts, new_opts): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id", Integer, primary_key=True), Column("test", String(10)), ) Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("tid", Integer), ForeignKeyConstraint(["tid"], ["some_table.id"], **old_opts), ) Table( "some_table", m2, Column("id", Integer, primary_key=True), Column("test", String(10)), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("tid", Integer), ForeignKeyConstraint(["tid"], ["some_table.id"], **new_opts), ) return self._fixture(m1, m2) @config.requirements.fk_ondelete_is_reflected def test_add_ondelete(self): diffs = self._fk_opts_fixture({}, {"ondelete": "cascade"}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], ondelete=None, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], ondelete="cascade", ) @config.requirements.fk_ondelete_is_reflected def test_remove_ondelete(self): diffs = self._fk_opts_fixture({"ondelete": "CASCADE"}, {}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], ondelete="CASCADE", conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], ondelete=None, ) def test_nochange_ondelete(self): """test case sensitivity""" diffs = self._fk_opts_fixture( {"ondelete": "caSCAde"}, {"ondelete": "CasCade"} ) eq_(diffs, []) @config.requirements.fk_onupdate_is_reflected def test_add_onupdate(self): diffs = self._fk_opts_fixture({}, {"onupdate": "cascade"}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], onupdate=None, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], onupdate="cascade", ) @config.requirements.fk_onupdate_is_reflected def test_remove_onupdate(self): diffs = self._fk_opts_fixture({"onupdate": "CASCADE"}, {}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], onupdate="CASCADE", conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], onupdate=None, ) @config.requirements.fk_onupdate def test_nochange_onupdate(self): """test case sensitivity""" diffs = self._fk_opts_fixture( {"onupdate": "caSCAde"}, {"onupdate": "CasCade"} ) eq_(diffs, []) @config.requirements.fk_ondelete_restrict def test_nochange_ondelete_restrict(self): """test the RESTRICT option which MySQL doesn't report on""" diffs = self._fk_opts_fixture( {"ondelete": "restrict"}, {"ondelete": "restrict"} ) eq_(diffs, []) @config.requirements.fk_onupdate_restrict def test_nochange_onupdate_restrict(self): """test the RESTRICT option which MySQL doesn't report on""" diffs = self._fk_opts_fixture( {"onupdate": "restrict"}, {"onupdate": "restrict"} ) eq_(diffs, []) @config.requirements.fk_ondelete_noaction def test_nochange_ondelete_noaction(self): """test the NO ACTION option which generally comes back as None""" diffs = self._fk_opts_fixture( {"ondelete": "no action"}, {"ondelete": "no action"} ) eq_(diffs, []) @config.requirements.fk_onupdate def test_nochange_onupdate_noaction(self): """test the NO ACTION option which generally comes back as None""" diffs = self._fk_opts_fixture( {"onupdate": "no action"}, {"onupdate": "no action"} ) eq_(diffs, []) @config.requirements.fk_ondelete_restrict def test_change_ondelete_from_restrict(self): """test the RESTRICT option which MySQL doesn't report on""" # note that this is impossible to detect if we change # from RESTRICT to NO ACTION on MySQL. diffs = self._fk_opts_fixture( {"ondelete": "restrict"}, {"ondelete": "cascade"} ) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], onupdate=None, ondelete=mock.ANY, # MySQL reports None, PG reports RESTRICT conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], onupdate=None, ondelete="cascade", ) @config.requirements.fk_ondelete_restrict def test_change_onupdate_from_restrict(self): """test the RESTRICT option which MySQL doesn't report on""" # note that this is impossible to detect if we change # from RESTRICT to NO ACTION on MySQL. diffs = self._fk_opts_fixture( {"onupdate": "restrict"}, {"onupdate": "cascade"} ) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], onupdate=mock.ANY, # MySQL reports None, PG reports RESTRICT ondelete=None, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], onupdate="cascade", ondelete=None, ) @config.requirements.fk_ondelete_is_reflected @config.requirements.fk_onupdate_is_reflected def test_ondelete_onupdate_combo(self): diffs = self._fk_opts_fixture( {"onupdate": "CASCADE", "ondelete": "SET NULL"}, {"onupdate": "RESTRICT", "ondelete": "RESTRICT"}, ) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], onupdate="CASCADE", ondelete="SET NULL", conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], onupdate="RESTRICT", ondelete="RESTRICT", ) @config.requirements.fk_initially def test_add_initially_deferred(self): diffs = self._fk_opts_fixture({}, {"initially": "deferred"}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], initially=None, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], initially="deferred", ) @config.requirements.fk_initially def test_remove_initially_deferred(self): diffs = self._fk_opts_fixture({"initially": "deferred"}, {}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], initially="DEFERRED", deferrable=True, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], initially=None, ) @config.requirements.fk_deferrable @config.requirements.fk_initially def test_add_initially_immediate_plus_deferrable(self): diffs = self._fk_opts_fixture( {}, {"initially": "immediate", "deferrable": True} ) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], initially=None, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], initially="immediate", deferrable=True, ) @config.requirements.fk_deferrable @config.requirements.fk_initially def test_remove_initially_immediate_plus_deferrable(self): diffs = self._fk_opts_fixture( {"initially": "immediate", "deferrable": True}, {} ) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], initially=None, # immediate is the default deferrable=True, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], initially=None, deferrable=None, ) @config.requirements.fk_initially @config.requirements.fk_deferrable def test_add_initially_deferrable_nochange_one(self): diffs = self._fk_opts_fixture( {"deferrable": True, "initially": "immediate"}, {"deferrable": True, "initially": "immediate"}, ) eq_(diffs, []) @config.requirements.fk_initially @config.requirements.fk_deferrable def test_add_initially_deferrable_nochange_two(self): diffs = self._fk_opts_fixture( {"deferrable": True, "initially": "deferred"}, {"deferrable": True, "initially": "deferred"}, ) eq_(diffs, []) @config.requirements.fk_initially @config.requirements.fk_deferrable def test_add_initially_deferrable_nochange_three(self): diffs = self._fk_opts_fixture( {"deferrable": None, "initially": "deferred"}, {"deferrable": None, "initially": "deferred"}, ) eq_(diffs, []) @config.requirements.fk_deferrable def test_add_deferrable(self): diffs = self._fk_opts_fixture({}, {"deferrable": True}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], deferrable=None, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], deferrable=True, ) @config.requirements.fk_deferrable_is_reflected def test_remove_deferrable(self): diffs = self._fk_opts_fixture({"deferrable": True}, {}) self._assert_fk_diff( diffs[0], "remove_fk", "user", ["tid"], "some_table", ["id"], deferrable=True, conditional_name="servergenerated", ) self._assert_fk_diff( diffs[1], "add_fk", "user", ["tid"], "some_table", ["id"], deferrable=None, ) alembic-rel_1_7_6/alembic/testing/suite/test_autogen_identity.py000066400000000000000000000137101417624537100253270ustar00rootroot00000000000000import sqlalchemy as sa from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from ._autogen_fixtures import AutogenFixtureTest from ... import testing from ...testing import config from ...testing import eq_ from ...testing import is_true from ...testing import TestBase class AutogenerateIdentityTest(AutogenFixtureTest, TestBase): __requires__ = ("identity_columns",) __backend__ = True def test_add_identity_column(self): m1 = MetaData() m2 = MetaData() Table("user", m1, Column("other", sa.Text)) Table( "user", m2, Column("other", sa.Text), Column( "id", Integer, sa.Identity(start=5, increment=7), primary_key=True, ), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_column") eq_(diffs[0][2], "user") eq_(diffs[0][3].name, "id") i = diffs[0][3].identity is_true(isinstance(i, sa.Identity)) eq_(i.start, 5) eq_(i.increment, 7) def test_remove_identity_column(self): m1 = MetaData() m2 = MetaData() Table( "user", m1, Column( "id", Integer, sa.Identity(start=2, increment=3), primary_key=True, ), ) Table("user", m2) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_column") eq_(diffs[0][2], "user") c = diffs[0][3] eq_(c.name, "id") is_true(isinstance(c.identity, sa.Identity)) eq_(c.identity.start, 2) eq_(c.identity.increment, 3) def test_no_change_identity_column(self): m1 = MetaData() m2 = MetaData() for m in (m1, m2): Table( "user", m, Column("id", Integer, sa.Identity(start=2)), ) diffs = self._fixture(m1, m2) eq_(diffs, []) @testing.combinations( (None, dict(start=2)), (dict(start=2), None), (dict(start=2), dict(start=2, increment=7)), (dict(always=False), dict(always=True)), ( dict(start=1, minvalue=0, maxvalue=100, cycle=True), dict(start=1, minvalue=0, maxvalue=100, cycle=False), ), ( dict(start=10, increment=3, maxvalue=9999), dict(start=10, increment=1, maxvalue=3333), ), ) @config.requirements.identity_columns_alter def test_change_identity(self, before, after): arg_before = (sa.Identity(**before),) if before else () arg_after = (sa.Identity(**after),) if after else () m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer, *arg_before), Column("other", sa.Text), ) Table( "user", m2, Column("id", Integer, *arg_after), Column("other", sa.Text), ) diffs = self._fixture(m1, m2) eq_(len(diffs[0]), 1) diffs = diffs[0][0] eq_(diffs[0], "modify_default") eq_(diffs[2], "user") eq_(diffs[3], "id") old = diffs[5] new = diffs[6] def check(kw, idt): if kw: is_true(isinstance(idt, sa.Identity)) for k, v in kw.items(): eq_(getattr(idt, k), v) else: is_true(idt in (None, False)) check(before, old) check(after, new) def test_add_identity_to_column(self): m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer), Column("other", sa.Text), ) Table( "user", m2, Column("id", Integer, sa.Identity(start=2, maxvalue=1000)), Column("other", sa.Text), ) diffs = self._fixture(m1, m2) eq_(len(diffs[0]), 1) diffs = diffs[0][0] eq_(diffs[0], "modify_default") eq_(diffs[2], "user") eq_(diffs[3], "id") eq_(diffs[5], None) added = diffs[6] is_true(isinstance(added, sa.Identity)) eq_(added.start, 2) eq_(added.maxvalue, 1000) def test_remove_identity_from_column(self): m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer, sa.Identity(start=2, maxvalue=1000)), Column("other", sa.Text), ) Table( "user", m2, Column("id", Integer), Column("other", sa.Text), ) diffs = self._fixture(m1, m2) eq_(len(diffs[0]), 1) diffs = diffs[0][0] eq_(diffs[0], "modify_default") eq_(diffs[2], "user") eq_(diffs[3], "id") eq_(diffs[6], None) removed = diffs[5] is_true(isinstance(removed, sa.Identity)) def test_identity_on_null(self): m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer, sa.Identity(start=2, on_null=True)), Column("other", sa.Text), ) Table( "user", m2, Column("id", Integer, sa.Identity(start=2, on_null=False)), Column("other", sa.Text), ) diffs = self._fixture(m1, m2) if not config.requirements.supports_identity_on_null.enabled: eq_(diffs, []) else: eq_(len(diffs[0]), 1) diffs = diffs[0][0] eq_(diffs[0], "modify_default") eq_(diffs[2], "user") eq_(diffs[3], "id") old = diffs[5] new = diffs[6] is_true(isinstance(old, sa.Identity)) is_true(isinstance(new, sa.Identity)) alembic-rel_1_7_6/alembic/testing/suite/test_environment.py000066400000000000000000000271361417624537100243270ustar00rootroot00000000000000import io from ...migration import MigrationContext from ...testing import assert_raises from ...testing import config from ...testing import eq_ from ...testing import is_ from ...testing import is_false from ...testing import is_not_ from ...testing import is_true from ...testing import ne_ from ...testing.fixtures import TestBase class MigrationTransactionTest(TestBase): __backend__ = True conn = None def _fixture(self, opts): self.conn = conn = config.db.connect() if opts.get("as_sql", False): self.context = MigrationContext.configure( dialect=conn.dialect, opts=opts ) self.context.output_buffer = ( self.context.impl.output_buffer ) = io.StringIO() else: self.context = MigrationContext.configure( connection=conn, opts=opts ) return self.context def teardown(self): if self.conn: self.conn.close() def test_proxy_transaction_rollback(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) is_false(self.conn.in_transaction()) proxy = context.begin_transaction(_per_migration=True) is_true(self.conn.in_transaction()) proxy.rollback() is_false(self.conn.in_transaction()) def test_proxy_transaction_commit(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) proxy = context.begin_transaction(_per_migration=True) is_true(self.conn.in_transaction()) proxy.commit() is_false(self.conn.in_transaction()) def test_proxy_transaction_contextmanager_commit(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) proxy = context.begin_transaction(_per_migration=True) is_true(self.conn.in_transaction()) with proxy: pass is_false(self.conn.in_transaction()) def test_proxy_transaction_contextmanager_rollback(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) proxy = context.begin_transaction(_per_migration=True) is_true(self.conn.in_transaction()) def go(): with proxy: raise Exception("hi") assert_raises(Exception, go) is_false(self.conn.in_transaction()) def test_proxy_transaction_contextmanager_explicit_rollback(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) proxy = context.begin_transaction(_per_migration=True) is_true(self.conn.in_transaction()) with proxy: is_true(self.conn.in_transaction()) proxy.rollback() is_false(self.conn.in_transaction()) is_false(self.conn.in_transaction()) def test_proxy_transaction_contextmanager_explicit_commit(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) proxy = context.begin_transaction(_per_migration=True) is_true(self.conn.in_transaction()) with proxy: is_true(self.conn.in_transaction()) proxy.commit() is_false(self.conn.in_transaction()) is_false(self.conn.in_transaction()) def test_transaction_per_migration_transactional_ddl(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": True} ) is_false(self.conn.in_transaction()) with context.begin_transaction(): is_false(self.conn.in_transaction()) with context.begin_transaction(_per_migration=True): is_true(self.conn.in_transaction()) is_false(self.conn.in_transaction()) is_false(self.conn.in_transaction()) def test_transaction_per_migration_non_transactional_ddl(self): context = self._fixture( {"transaction_per_migration": True, "transactional_ddl": False} ) is_false(self.conn.in_transaction()) with context.begin_transaction(): is_false(self.conn.in_transaction()) with context.begin_transaction(_per_migration=True): is_true(self.conn.in_transaction()) is_false(self.conn.in_transaction()) is_false(self.conn.in_transaction()) def test_transaction_per_all_transactional_ddl(self): context = self._fixture({"transactional_ddl": True}) is_false(self.conn.in_transaction()) with context.begin_transaction(): is_true(self.conn.in_transaction()) with context.begin_transaction(_per_migration=True): is_true(self.conn.in_transaction()) is_true(self.conn.in_transaction()) is_false(self.conn.in_transaction()) def test_transaction_per_all_non_transactional_ddl(self): context = self._fixture({"transactional_ddl": False}) is_false(self.conn.in_transaction()) with context.begin_transaction(): is_false(self.conn.in_transaction()) with context.begin_transaction(_per_migration=True): is_true(self.conn.in_transaction()) is_false(self.conn.in_transaction()) is_false(self.conn.in_transaction()) def test_transaction_per_all_sqlmode(self): context = self._fixture({"as_sql": True}) context.execute("step 1") with context.begin_transaction(): context.execute("step 2") with context.begin_transaction(_per_migration=True): context.execute("step 3") context.execute("step 4") context.execute("step 5") if context.impl.transactional_ddl: self._assert_impl_steps( "step 1", "BEGIN", "step 2", "step 3", "step 4", "COMMIT", "step 5", ) else: self._assert_impl_steps( "step 1", "step 2", "step 3", "step 4", "step 5" ) def test_transaction_per_migration_sqlmode(self): context = self._fixture( {"as_sql": True, "transaction_per_migration": True} ) context.execute("step 1") with context.begin_transaction(): context.execute("step 2") with context.begin_transaction(_per_migration=True): context.execute("step 3") context.execute("step 4") context.execute("step 5") if context.impl.transactional_ddl: self._assert_impl_steps( "step 1", "step 2", "BEGIN", "step 3", "COMMIT", "step 4", "step 5", ) else: self._assert_impl_steps( "step 1", "step 2", "step 3", "step 4", "step 5" ) @config.requirements.autocommit_isolation def test_autocommit_block(self): context = self._fixture({"transaction_per_migration": True}) is_false(self.conn.in_transaction()) with context.begin_transaction(): is_false(self.conn.in_transaction()) with context.begin_transaction(_per_migration=True): is_true(self.conn.in_transaction()) with context.autocommit_block(): # in 1.x, self.conn is separate due to the # execution_options call. however for future they are the # same connection and there is a "transaction" block # despite autocommit if self.is_sqlalchemy_future: is_(context.connection, self.conn) else: is_not_(context.connection, self.conn) is_false(self.conn.in_transaction()) eq_( context.connection._execution_options[ "isolation_level" ], "AUTOCOMMIT", ) ne_( context.connection._execution_options.get( "isolation_level", None ), "AUTOCOMMIT", ) is_true(self.conn.in_transaction()) is_false(self.conn.in_transaction()) is_false(self.conn.in_transaction()) @config.requirements.autocommit_isolation def test_autocommit_block_no_transaction(self): context = self._fixture({"transaction_per_migration": True}) is_false(self.conn.in_transaction()) with context.autocommit_block(): is_true(context.connection.in_transaction()) # in 1.x, self.conn is separate due to the execution_options # call. however for future they are the same connection and there # is a "transaction" block despite autocommit if self.is_sqlalchemy_future: is_(context.connection, self.conn) else: is_not_(context.connection, self.conn) is_false(self.conn.in_transaction()) eq_( context.connection._execution_options["isolation_level"], "AUTOCOMMIT", ) ne_( context.connection._execution_options.get("isolation_level", None), "AUTOCOMMIT", ) is_false(self.conn.in_transaction()) def test_autocommit_block_transactional_ddl_sqlmode(self): context = self._fixture( { "transaction_per_migration": True, "transactional_ddl": True, "as_sql": True, } ) with context.begin_transaction(): context.execute("step 1") with context.begin_transaction(_per_migration=True): context.execute("step 2") with context.autocommit_block(): context.execute("step 3") context.execute("step 4") context.execute("step 5") self._assert_impl_steps( "step 1", "BEGIN", "step 2", "COMMIT", "step 3", "BEGIN", "step 4", "COMMIT", "step 5", ) def test_autocommit_block_nontransactional_ddl_sqlmode(self): context = self._fixture( { "transaction_per_migration": True, "transactional_ddl": False, "as_sql": True, } ) with context.begin_transaction(): context.execute("step 1") with context.begin_transaction(_per_migration=True): context.execute("step 2") with context.autocommit_block(): context.execute("step 3") context.execute("step 4") context.execute("step 5") self._assert_impl_steps( "step 1", "step 2", "step 3", "step 4", "step 5" ) def _assert_impl_steps(self, *steps): to_check = self.context.output_buffer.getvalue() self.context.impl.output_buffer = buf = io.StringIO() for step in steps: if step == "BEGIN": self.context.impl.emit_begin() elif step == "COMMIT": self.context.impl.emit_commit() else: self.context.impl._exec(step) eq_(to_check, buf.getvalue()) alembic-rel_1_7_6/alembic/testing/suite/test_op.py000066400000000000000000000024771417624537100224020ustar00rootroot00000000000000"""Test against the builders in the op.* module.""" from sqlalchemy import Column from sqlalchemy import event from sqlalchemy import Integer from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.sql import text from ...testing.fixtures import AlterColRoundTripFixture from ...testing.fixtures import TestBase @event.listens_for(Table, "after_parent_attach") def _add_cols(table, metadata): if table.name == "tbl_with_auto_appended_column": table.append_column(Column("bat", Integer)) class BackendAlterColumnTest(AlterColRoundTripFixture, TestBase): __backend__ = True def test_rename_column(self): self._run_alter_col({}, {"name": "newname"}) def test_modify_type_int_str(self): self._run_alter_col({"type": Integer()}, {"type": String(50)}) def test_add_server_default_int(self): self._run_alter_col({"type": Integer}, {"server_default": text("5")}) def test_modify_server_default_int(self): self._run_alter_col( {"type": Integer, "server_default": text("2")}, {"server_default": text("5")}, ) def test_modify_nullable_to_non(self): self._run_alter_col({}, {"nullable": False}) def test_modify_non_nullable_to_nullable(self): self._run_alter_col({"nullable": False}, {"nullable": True}) alembic-rel_1_7_6/alembic/testing/util.py000066400000000000000000000063271417624537100205470ustar00rootroot00000000000000# testing/util.py # Copyright (C) 2005-2019 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re import types from typing import Union def flag_combinations(*combinations): """A facade around @testing.combinations() oriented towards boolean keyword-based arguments. Basically generates a nice looking identifier based on the keywords and also sets up the argument names. E.g.:: @testing.flag_combinations( dict(lazy=False, passive=False), dict(lazy=True, passive=False), dict(lazy=False, passive=True), dict(lazy=False, passive=True, raiseload=True), ) would result in:: @testing.combinations( ('', False, False, False), ('lazy', True, False, False), ('lazy_passive', True, True, False), ('lazy_passive', True, True, True), id_='iaaa', argnames='lazy,passive,raiseload' ) """ from sqlalchemy.testing import config keys = set() for d in combinations: keys.update(d) keys = sorted(keys) return config.combinations( *[ ("_".join(k for k in keys if d.get(k, False)),) + tuple(d.get(k, False) for k in keys) for d in combinations ], id_="i" + ("a" * len(keys)), argnames=",".join(keys) ) def resolve_lambda(__fn, **kw): """Given a no-arg lambda and a namespace, return a new lambda that has all the values filled in. This is used so that we can have module-level fixtures that refer to instance-level variables using lambdas. """ glb = dict(__fn.__globals__) glb.update(kw) new_fn = types.FunctionType(__fn.__code__, glb) return new_fn() def metadata_fixture(ddl="function"): """Provide MetaData for a pytest fixture.""" from sqlalchemy.testing import config from . import fixture_functions def decorate(fn): def run_ddl(self): from sqlalchemy import schema metadata = self.metadata = schema.MetaData() try: result = fn(self, metadata) metadata.create_all(config.db) # TODO: # somehow get a per-function dml erase fixture here yield result finally: metadata.drop_all(config.db) return fixture_functions.fixture(scope=ddl)(run_ddl) return decorate def _safe_int(value: str) -> Union[int, str]: try: return int(value) except: return value def testing_engine(url=None, options=None, future=False): from sqlalchemy.testing import config from sqlalchemy.testing.engines import testing_engine from sqlalchemy import __version__ _vers = tuple( [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)] ) sqla_1x = _vers < (2,) if not future: future = getattr(config._current.options, "future_engine", False) if sqla_1x: kw = {"future": future} if future else {} else: kw = {} return testing_engine(url, options, **kw) alembic-rel_1_7_6/alembic/testing/warnings.py000066400000000000000000000015461417624537100214200ustar00rootroot00000000000000# testing/warnings.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from __future__ import absolute_import import warnings from sqlalchemy import exc as sa_exc def setup_filters(): """Set global warning behavior for the test suite.""" warnings.resetwarnings() warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) warnings.filterwarnings("error", category=sa_exc.SAWarning) # some selected deprecations... warnings.filterwarnings("error", category=DeprecationWarning) try: import pytest except ImportError: pass else: warnings.filterwarnings( "once", category=pytest.PytestDeprecationWarning ) alembic-rel_1_7_6/alembic/util/000077500000000000000000000000001417624537100165105ustar00rootroot00000000000000alembic-rel_1_7_6/alembic/util/__init__.py000066400000000000000000000021021417624537100206140ustar00rootroot00000000000000from .editor import open_in_editor from .exc import CommandError from .langhelpers import _with_legacy_names from .langhelpers import asbool from .langhelpers import dedupe_tuple from .langhelpers import Dispatcher from .langhelpers import immutabledict from .langhelpers import memoized_property from .langhelpers import ModuleClsProxy from .langhelpers import rev_id from .langhelpers import to_list from .langhelpers import to_tuple from .langhelpers import unique_list from .messaging import err from .messaging import format_as_comma from .messaging import msg from .messaging import obfuscate_url_pw from .messaging import status from .messaging import warn from .messaging import write_outstream from .pyfiles import coerce_resource_to_filename from .pyfiles import load_python_file from .pyfiles import pyc_file_from_path from .pyfiles import template_to_file from .sqla_compat import has_computed from .sqla_compat import sqla_13 from .sqla_compat import sqla_14 from .sqla_compat import sqla_1x if not sqla_13: raise CommandError("SQLAlchemy 1.3.0 or greater is required.") alembic-rel_1_7_6/alembic/util/compat.py000066400000000000000000000032171417624537100203500ustar00rootroot00000000000000import io import os import sys from typing import Tuple from sqlalchemy.util import inspect_getfullargspec # noqa from sqlalchemy.util.compat import inspect_formatargspec # noqa is_posix = os.name == "posix" py39 = sys.version_info >= (3, 9) py38 = sys.version_info >= (3, 8) py37 = sys.version_info >= (3, 7) # produce a wrapper that allows encoded text to stream # into a given buffer, but doesn't close it. # not sure of a more idiomatic approach to this. class EncodedIO(io.TextIOWrapper): def close(self) -> None: pass if py39: from importlib import resources as importlib_resources from importlib import metadata as importlib_metadata from importlib.metadata import EntryPoint else: import importlib_resources # type:ignore[no-redef] # noqa import importlib_metadata # type:ignore[no-redef] # noqa from importlib_metadata import EntryPoint # type:ignore # noqa def importlib_metadata_get(group: str) -> Tuple[EntryPoint, ...]: ep = importlib_metadata.entry_points() if hasattr(ep, "select"): return ep.select(group=group) # type:ignore[attr-defined] else: return ep.get(group, ()) def formatannotation_fwdref(annotation, base_module=None): """the python 3.7 _formatannotation with an extra repr() for 3rd party modules""" if getattr(annotation, "__module__", None) == "typing": return repr(annotation).replace("typing.", "") if isinstance(annotation, type): if annotation.__module__ in ("builtins", base_module): return annotation.__qualname__ return repr(annotation.__module__ + "." + annotation.__qualname__) return repr(annotation) alembic-rel_1_7_6/alembic/util/editor.py000066400000000000000000000047161417624537100203600ustar00rootroot00000000000000import os from os.path import exists from os.path import join from os.path import splitext from subprocess import check_call from typing import Dict from typing import List from typing import Mapping from typing import Optional from .compat import is_posix from .exc import CommandError def open_in_editor( filename: str, environ: Optional[Dict[str, str]] = None ) -> None: """ Opens the given file in a text editor. If the environment variable ``EDITOR`` is set, this is taken as preference. Otherwise, a list of commonly installed editors is tried. If no editor matches, an :py:exc:`OSError` is raised. :param filename: The filename to open. Will be passed verbatim to the editor command. :param environ: An optional drop-in replacement for ``os.environ``. Used mainly for testing. """ env = os.environ if environ is None else environ try: editor = _find_editor(env) check_call([editor, filename]) except Exception as exc: raise CommandError("Error executing editor (%s)" % (exc,)) from exc def _find_editor(environ: Mapping[str, str]) -> str: candidates = _default_editors() for i, var in enumerate(("EDITOR", "VISUAL")): if var in environ: user_choice = environ[var] if exists(user_choice): return user_choice if os.sep not in user_choice: candidates.insert(i, user_choice) for candidate in candidates: path = _find_executable(candidate, environ) if path is not None: return path raise OSError( "No suitable editor found. Please set the " '"EDITOR" or "VISUAL" environment variables' ) def _find_executable( candidate: str, environ: Mapping[str, str] ) -> Optional[str]: # Assuming this is on the PATH, we need to determine it's absolute # location. Otherwise, ``check_call`` will fail if not is_posix and splitext(candidate)[1] != ".exe": candidate += ".exe" for path in environ.get("PATH", "").split(os.pathsep): value = join(path, candidate) if exists(value): return value return None def _default_editors() -> List[str]: # Look for an editor. Prefer the user's choice by env-var, fall back to # most commonly installed editor (nano/vim) if is_posix: return ["sensible-editor", "editor", "nano", "vim", "code"] else: return ["code.exe", "notepad++.exe", "notepad.exe"] alembic-rel_1_7_6/alembic/util/exc.py000066400000000000000000000000501417624537100176340ustar00rootroot00000000000000class CommandError(Exception): pass alembic-rel_1_7_6/alembic/util/langhelpers.py000066400000000000000000000203231417624537100213660ustar00rootroot00000000000000import collections from collections.abc import Iterable import textwrap from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import overload from typing import Sequence from typing import Tuple from typing import TypeVar from typing import Union import uuid import warnings from sqlalchemy.util import asbool # noqa from sqlalchemy.util import immutabledict # noqa from sqlalchemy.util import memoized_property # noqa from sqlalchemy.util import to_list # noqa from sqlalchemy.util import unique_list # noqa from .compat import inspect_getfullargspec _T = TypeVar("_T") class _ModuleClsMeta(type): def __setattr__(cls, key: str, value: Callable) -> None: super(_ModuleClsMeta, cls).__setattr__(key, value) cls._update_module_proxies(key) # type: ignore class ModuleClsProxy(metaclass=_ModuleClsMeta): """Create module level proxy functions for the methods on a given class. The functions will have a compatible signature as the methods. """ _setups: Dict[type, Tuple[set, list]] = collections.defaultdict( lambda: (set(), []) ) @classmethod def _update_module_proxies(cls, name: str) -> None: attr_names, modules = cls._setups[cls] for globals_, locals_ in modules: cls._add_proxied_attribute(name, globals_, locals_, attr_names) def _install_proxy(self) -> None: attr_names, modules = self._setups[self.__class__] for globals_, locals_ in modules: globals_["_proxy"] = self for attr_name in attr_names: globals_[attr_name] = getattr(self, attr_name) def _remove_proxy(self) -> None: attr_names, modules = self._setups[self.__class__] for globals_, locals_ in modules: globals_["_proxy"] = None for attr_name in attr_names: del globals_[attr_name] @classmethod def create_module_class_proxy(cls, globals_, locals_): attr_names, modules = cls._setups[cls] modules.append((globals_, locals_)) cls._setup_proxy(globals_, locals_, attr_names) @classmethod def _setup_proxy(cls, globals_, locals_, attr_names): for methname in dir(cls): cls._add_proxied_attribute(methname, globals_, locals_, attr_names) @classmethod def _add_proxied_attribute(cls, methname, globals_, locals_, attr_names): if not methname.startswith("_"): meth = getattr(cls, methname) if callable(meth): locals_[methname] = cls._create_method_proxy( methname, globals_, locals_ ) else: attr_names.add(methname) @classmethod def _create_method_proxy(cls, name, globals_, locals_): fn = getattr(cls, name) def _name_error(name, from_): raise NameError( "Can't invoke function '%s', as the proxy object has " "not yet been " "established for the Alembic '%s' class. " "Try placing this code inside a callable." % (name, cls.__name__) ) from from_ globals_["_name_error"] = _name_error translations = getattr(fn, "_legacy_translations", []) if translations: spec = inspect_getfullargspec(fn) if spec[0] and spec[0][0] == "self": spec[0].pop(0) outer_args = inner_args = "*args, **kw" translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % ( fn.__name__, tuple(spec), translations, ) def translate(fn_name, spec, translations, args, kw): return_kw = {} return_args = [] for oldname, newname in translations: if oldname in kw: warnings.warn( "Argument %r is now named %r " "for method %s()." % (oldname, newname, fn_name) ) return_kw[newname] = kw.pop(oldname) return_kw.update(kw) args = list(args) if spec[3]: pos_only = spec[0][: -len(spec[3])] else: pos_only = spec[0] for arg in pos_only: if arg not in return_kw: try: return_args.append(args.pop(0)) except IndexError: raise TypeError( "missing required positional argument: %s" % arg ) return_args.extend(args) return return_args, return_kw globals_["_translate"] = translate else: outer_args = "*args, **kw" inner_args = "*args, **kw" translate_str = "" func_text = textwrap.dedent( """\ def %(name)s(%(args)s): %(doc)r %(translate)s try: p = _proxy except NameError as ne: _name_error('%(name)s', ne) return _proxy.%(name)s(%(apply_kw)s) e """ % { "name": name, "translate": translate_str, "args": outer_args, "apply_kw": inner_args, "doc": fn.__doc__, } ) lcl = {} exec(func_text, globals_, lcl) return lcl[name] def _with_legacy_names(translations): def decorate(fn): fn._legacy_translations = translations return fn return decorate def rev_id() -> str: return uuid.uuid4().hex[-12:] @overload def to_tuple(x: Any, default: tuple) -> tuple: ... @overload def to_tuple(x: None, default: _T = None) -> _T: ... @overload def to_tuple(x: Any, default: Optional[tuple] = None) -> tuple: ... def to_tuple(x, default=None): if x is None: return default elif isinstance(x, str): return (x,) elif isinstance(x, Iterable): return tuple(x) else: return (x,) def dedupe_tuple(tup: Tuple[str, ...]) -> Tuple[str, ...]: return tuple(unique_list(tup)) class Dispatcher: def __init__(self, uselist: bool = False) -> None: self._registry: Dict[tuple, Any] = {} self.uselist = uselist def dispatch_for( self, target: Any, qualifier: str = "default" ) -> Callable: def decorate(fn): if self.uselist: self._registry.setdefault((target, qualifier), []).append(fn) else: assert (target, qualifier) not in self._registry self._registry[(target, qualifier)] = fn return fn return decorate def dispatch(self, obj: Any, qualifier: str = "default") -> Any: if isinstance(obj, str): targets: Sequence = [obj] elif isinstance(obj, type): targets = obj.__mro__ else: targets = type(obj).__mro__ for spcls in targets: if qualifier != "default" and (spcls, qualifier) in self._registry: return self._fn_or_list(self._registry[(spcls, qualifier)]) elif (spcls, "default") in self._registry: return self._fn_or_list(self._registry[(spcls, "default")]) else: raise ValueError("no dispatch function for object: %s" % obj) def _fn_or_list( self, fn_or_list: Union[List[Callable], Callable] ) -> Callable: if self.uselist: def go(*arg, **kw): for fn in fn_or_list: fn(*arg, **kw) return go else: return fn_or_list # type: ignore def branch(self) -> "Dispatcher": """Return a copy of this dispatcher that is independently writable.""" d = Dispatcher() if self.uselist: d._registry.update( (k, [fn for fn in self._registry[k]]) for k in self._registry ) else: d._registry.update(self._registry) return d alembic-rel_1_7_6/alembic/util/messaging.py000066400000000000000000000055621417624537100210470ustar00rootroot00000000000000from collections.abc import Iterable import logging import sys import textwrap from typing import Any from typing import Callable from typing import Optional from typing import TextIO from typing import Union import warnings from sqlalchemy.engine import url from . import sqla_compat log = logging.getLogger(__name__) # disable "no handler found" errors logging.getLogger("alembic").addHandler(logging.NullHandler()) try: import fcntl import termios import struct ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)) _h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl) if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty TERMWIDTH = None except (ImportError, IOError): TERMWIDTH = None def write_outstream(stream: TextIO, *text) -> None: encoding = getattr(stream, "encoding", "ascii") or "ascii" for t in text: if not isinstance(t, bytes): t = t.encode(encoding, "replace") t = t.decode(encoding) try: stream.write(t) except IOError: # suppress "broken pipe" errors. # no known way to handle this on Python 3 however # as the exception is "ignored" (noisily) in TextIOWrapper. break def status(_statmsg: str, fn: Callable, *arg, **kw) -> Any: newline = kw.pop("newline", False) msg(_statmsg + " ...", newline, True) try: ret = fn(*arg, **kw) write_outstream(sys.stdout, " done\n") return ret except: write_outstream(sys.stdout, " FAILED\n") raise def err(message: str): log.error(message) msg("FAILED: %s" % message) sys.exit(-1) def obfuscate_url_pw(input_url: str) -> str: u = url.make_url(input_url) if u.password: if sqla_compat.sqla_14: u = u.set(password="XXXXX") else: u.password = "XXXXX" # type: ignore[misc] return str(u) def warn(msg: str, stacklevel: int = 2) -> None: warnings.warn(msg, UserWarning, stacklevel=stacklevel) def msg(msg: str, newline: bool = True, flush: bool = False) -> None: if TERMWIDTH is None: write_outstream(sys.stdout, msg) if newline: write_outstream(sys.stdout, "\n") else: # left indent output lines lines = textwrap.wrap(msg, TERMWIDTH) if len(lines) > 1: for line in lines[0:-1]: write_outstream(sys.stdout, " ", line, "\n") write_outstream(sys.stdout, " ", lines[-1], ("\n" if newline else "")) if flush: sys.stdout.flush() def format_as_comma(value: Optional[Union[str, "Iterable[str]"]]) -> str: if value is None: return "" elif isinstance(value, str): return value elif isinstance(value, Iterable): return ", ".join(value) else: raise ValueError("Don't know how to comma-format %r" % value) alembic-rel_1_7_6/alembic/util/pyfiles.py000066400000000000000000000064121417624537100205400ustar00rootroot00000000000000import atexit from contextlib import ExitStack import importlib import importlib.machinery import importlib.util import os import re import tempfile from typing import Optional from mako import exceptions from mako.template import Template from . import compat from .exc import CommandError def template_to_file( template_file: str, dest: str, output_encoding: str, **kw ) -> None: template = Template(filename=template_file) try: output = template.render_unicode(**kw).encode(output_encoding) except: with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf: ntf.write( exceptions.text_error_template() .render_unicode() .encode(output_encoding) ) fname = ntf.name raise CommandError( "Template rendering failed; see %s for a " "template-oriented traceback." % fname ) else: with open(dest, "wb") as f: f.write(output) def coerce_resource_to_filename(fname: str) -> str: """Interpret a filename as either a filesystem location or as a package resource. Names that are non absolute paths and contain a colon are interpreted as resources and coerced to a file location. """ if not os.path.isabs(fname) and ":" in fname: tokens = fname.split(":") # from https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename # noqa E501 file_manager = ExitStack() atexit.register(file_manager.close) ref = compat.importlib_resources.files(tokens[0]) for tok in tokens[1:]: ref = ref / tok fname = file_manager.enter_context( # type: ignore[assignment] compat.importlib_resources.as_file(ref) ) return fname def pyc_file_from_path(path: str) -> Optional[str]: """Given a python source path, locate the .pyc.""" candidate = importlib.util.cache_from_source(path) if os.path.exists(candidate): return candidate # even for pep3147, fall back to the old way of finding .pyc files, # to support sourceless operation filepath, ext = os.path.splitext(path) for ext in importlib.machinery.BYTECODE_SUFFIXES: if os.path.exists(filepath + ext): return filepath + ext else: return None def load_python_file(dir_: str, filename: str): """Load a file from the given path as a Python module.""" module_id = re.sub(r"\W", "_", filename) path = os.path.join(dir_, filename) _, ext = os.path.splitext(filename) if ext == ".py": if os.path.exists(path): module = load_module_py(module_id, path) else: pyc_path = pyc_file_from_path(path) if pyc_path is None: raise ImportError("Can't find Python file %s" % path) else: module = load_module_py(module_id, pyc_path) elif ext in (".pyc", ".pyo"): module = load_module_py(module_id, path) return module def load_module_py(module_id: str, path: str): spec = importlib.util.spec_from_file_location(module_id, path) assert spec module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # type: ignore return module alembic-rel_1_7_6/alembic/util/sqla_compat.py000066400000000000000000000360521417624537100213730ustar00rootroot00000000000000import contextlib import re from typing import Iterator from typing import Mapping from typing import Optional from typing import TYPE_CHECKING from typing import TypeVar from typing import Union from sqlalchemy import __version__ from sqlalchemy import inspect from sqlalchemy import schema from sqlalchemy import sql from sqlalchemy import types as sqltypes from sqlalchemy.engine import url from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import CheckConstraint from sqlalchemy.schema import Column from sqlalchemy.schema import ForeignKeyConstraint from sqlalchemy.sql import visitors from sqlalchemy.sql.elements import BindParameter from sqlalchemy.sql.elements import quoted_name from sqlalchemy.sql.elements import TextClause from sqlalchemy.sql.visitors import traverse if TYPE_CHECKING: from sqlalchemy import Index from sqlalchemy import Table from sqlalchemy.engine import Connection from sqlalchemy.engine import Dialect from sqlalchemy.engine import Transaction from sqlalchemy.engine.reflection import Inspector from sqlalchemy.sql.base import ColumnCollection from sqlalchemy.sql.compiler import SQLCompiler from sqlalchemy.sql.dml import Insert from sqlalchemy.sql.elements import ColumnClause from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.schema import Constraint from sqlalchemy.sql.schema import SchemaItem from sqlalchemy.sql.selectable import Select from sqlalchemy.sql.selectable import TableClause _CE = TypeVar("_CE", bound=Union["ColumnElement", "SchemaItem"]) def _safe_int(value: str) -> Union[int, str]: try: return int(value) except: return value _vers = tuple( [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)] ) sqla_13 = _vers >= (1, 3) sqla_14 = _vers >= (1, 4) sqla_14_26 = _vers >= (1, 4, 26) if sqla_14: # when future engine merges, this can be again based on version string from sqlalchemy.engine import Connection as legacy_connection sqla_1x = not hasattr(legacy_connection, "commit") else: sqla_1x = True try: from sqlalchemy import Computed # noqa except ImportError: Computed = type(None) # type: ignore has_computed = False has_computed_reflection = False else: has_computed = True has_computed_reflection = _vers >= (1, 3, 16) try: from sqlalchemy import Identity # noqa except ImportError: Identity = type(None) # type: ignore has_identity = False else: # attributes common to Indentity and Sequence _identity_options_attrs = ( "start", "increment", "minvalue", "maxvalue", "nominvalue", "nomaxvalue", "cycle", "cache", "order", ) # attributes of Indentity _identity_attrs = _identity_options_attrs + ("on_null",) has_identity = True AUTOINCREMENT_DEFAULT = "auto" @contextlib.contextmanager def _ensure_scope_for_ddl( connection: Optional["Connection"], ) -> Iterator[None]: try: in_transaction = connection.in_transaction # type: ignore[union-attr] except AttributeError: # catch for MockConnection, None yield else: if not in_transaction(): assert connection is not None with connection.begin(): yield else: yield def _safe_begin_connection_transaction( connection: "Connection", ) -> "Transaction": transaction = _get_connection_transaction(connection) if transaction: return transaction else: return connection.begin() def _safe_commit_connection_transaction( connection: "Connection", ) -> None: transaction = _get_connection_transaction(connection) if transaction: transaction.commit() def _safe_rollback_connection_transaction( connection: "Connection", ) -> None: transaction = _get_connection_transaction(connection) if transaction: transaction.rollback() def _get_connection_in_transaction(connection: Optional["Connection"]) -> bool: try: in_transaction = connection.in_transaction # type: ignore except AttributeError: # catch for MockConnection return False else: return in_transaction() def _copy(schema_item: _CE, **kw) -> _CE: if hasattr(schema_item, "_copy"): return schema_item._copy(**kw) # type: ignore[union-attr] else: return schema_item.copy(**kw) # type: ignore[union-attr] def _get_connection_transaction( connection: "Connection", ) -> Optional["Transaction"]: if sqla_14: return connection.get_transaction() else: r = connection._root # type: ignore[attr-defined] return r._Connection__transaction def _create_url(*arg, **kw) -> url.URL: if hasattr(url.URL, "create"): return url.URL.create(*arg, **kw) else: return url.URL(*arg, **kw) def _connectable_has_table( connectable: "Connection", tablename: str, schemaname: Union[str, None] ) -> bool: if sqla_14: return inspect(connectable).has_table(tablename, schemaname) else: return connectable.dialect.has_table( connectable, tablename, schemaname ) def _exec_on_inspector(inspector, statement, **params): if sqla_14: with inspector._operation_context() as conn: return conn.execute(statement, params) else: return inspector.bind.execute(statement, params) def _nullability_might_be_unset(metadata_column): if not sqla_14: return metadata_column.nullable else: from sqlalchemy.sql import schema return ( metadata_column._user_defined_nullable is schema.NULL_UNSPECIFIED ) def _server_default_is_computed(*server_default) -> bool: if not has_computed: return False else: return any(isinstance(sd, Computed) for sd in server_default) def _server_default_is_identity(*server_default) -> bool: if not sqla_14: return False else: return any(isinstance(sd, Identity) for sd in server_default) def _table_for_constraint(constraint: "Constraint") -> "Table": if isinstance(constraint, ForeignKeyConstraint): table = constraint.parent assert table is not None return table else: return constraint.table def _columns_for_constraint(constraint): if isinstance(constraint, ForeignKeyConstraint): return [fk.parent for fk in constraint.elements] elif isinstance(constraint, CheckConstraint): return _find_columns(constraint.sqltext) else: return list(constraint.columns) def _reflect_table( inspector: "Inspector", table: "Table", include_cols: None ) -> None: if sqla_14: return inspector.reflect_table(table, None) else: return inspector.reflecttable(table, None) def _resolve_for_variant(type_, dialect): if _type_has_variants(type_): base_type, mapping = _get_variant_mapping(type_) return mapping.get(dialect.name, base_type) else: return type_ if hasattr(sqltypes.TypeEngine, "_variant_mapping"): def _type_has_variants(type_): return bool(type_._variant_mapping) def _get_variant_mapping(type_): return type_, type_._variant_mapping else: def _type_has_variants(type_): return type(type_) is sqltypes.Variant def _get_variant_mapping(type_): return type_.impl, type_.mapping def _fk_spec(constraint): source_columns = [ constraint.columns[key].name for key in constraint.column_keys ] source_table = constraint.parent.name source_schema = constraint.parent.schema target_schema = constraint.elements[0].column.table.schema target_table = constraint.elements[0].column.table.name target_columns = [element.column.name for element in constraint.elements] ondelete = constraint.ondelete onupdate = constraint.onupdate deferrable = constraint.deferrable initially = constraint.initially return ( source_schema, source_table, source_columns, target_schema, target_table, target_columns, onupdate, ondelete, deferrable, initially, ) def _fk_is_self_referential(constraint: "ForeignKeyConstraint") -> bool: spec = constraint.elements[0]._get_colspec() # type: ignore[attr-defined] tokens = spec.split(".") tokens.pop(-1) # colname tablekey = ".".join(tokens) assert constraint.parent is not None return tablekey == constraint.parent.key def _is_type_bound(constraint: "Constraint") -> bool: # this deals with SQLAlchemy #3260, don't copy CHECK constraints # that will be generated by the type. # new feature added for #3260 return constraint._type_bound # type: ignore[attr-defined] def _find_columns(clause): """locate Column objects within the given expression.""" cols = set() traverse(clause, {}, {"column": cols.add}) return cols def _remove_column_from_collection( collection: "ColumnCollection", column: Union["Column", "ColumnClause"] ) -> None: """remove a column from a ColumnCollection.""" # workaround for older SQLAlchemy, remove the # same object that's present assert column.key is not None to_remove = collection[column.key] collection.remove(to_remove) def _textual_index_column( table: "Table", text_: Union[str, "TextClause", "ColumnElement"] ) -> Union["ColumnElement", "Column"]: """a workaround for the Index construct's severe lack of flexibility""" if isinstance(text_, str): c = Column(text_, sqltypes.NULLTYPE) table.append_column(c) return c elif isinstance(text_, TextClause): return _textual_index_element(table, text_) elif isinstance(text_, sql.ColumnElement): return _copy_expression(text_, table) else: raise ValueError("String or text() construct expected") def _copy_expression(expression: _CE, target_table: "Table") -> _CE: def replace(col): if ( isinstance(col, Column) and col.table is not None and col.table is not target_table ): if col.name in target_table.c: return target_table.c[col.name] else: c = _copy(col) target_table.append_column(c) return c else: return None return visitors.replacement_traverse(expression, {}, replace) class _textual_index_element(sql.ColumnElement): """Wrap around a sqlalchemy text() construct in such a way that we appear like a column-oriented SQL expression to an Index construct. The issue here is that currently the Postgresql dialect, the biggest recipient of functional indexes, keys all the index expressions to the corresponding column expressions when rendering CREATE INDEX, so the Index we create here needs to have a .columns collection that is the same length as the .expressions collection. Ultimately SQLAlchemy should support text() expressions in indexes. See SQLAlchemy issue 3174. """ __visit_name__ = "_textual_idx_element" def __init__(self, table: "Table", text: "TextClause") -> None: self.table = table self.text = text self.key = text.text self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE) table.append_column(self.fake_column) def get_children(self): return [self.fake_column] @compiles(_textual_index_element) def _render_textual_index_column( element: _textual_index_element, compiler: "SQLCompiler", **kw ) -> str: return compiler.process(element.text, **kw) class _literal_bindparam(BindParameter): pass @compiles(_literal_bindparam) def _render_literal_bindparam( element: _literal_bindparam, compiler: "SQLCompiler", **kw ) -> str: return compiler.render_literal_bindparam(element, **kw) def _get_index_expressions(idx): return list(idx.expressions) def _get_index_column_names(idx): return [getattr(exp, "name", None) for exp in _get_index_expressions(idx)] def _column_kwargs(col: "Column") -> Mapping: if sqla_13: return col.kwargs else: return {} def _get_constraint_final_name( constraint: Union["Index", "Constraint"], dialect: Optional["Dialect"] ) -> Optional[str]: if constraint.name is None: return None assert dialect is not None if sqla_14: # for SQLAlchemy 1.4 we would like to have the option to expand # the use of "deferred" names for constraints as well as to have # some flexibility with "None" name and similar; make use of new # SQLAlchemy API to return what would be the final compiled form of # the name for this dialect. return dialect.identifier_preparer.format_constraint( constraint, _alembic_quote=False ) else: # prior to SQLAlchemy 1.4, work around quoting logic to get at the # final compiled name without quotes. if hasattr(constraint.name, "quote"): # might be quoted_name, might be truncated_name, keep it the # same quoted_name_cls: type = type(constraint.name) else: quoted_name_cls = quoted_name new_name = quoted_name_cls(str(constraint.name), quote=False) constraint = constraint.__class__(name=new_name) if isinstance(constraint, schema.Index): # name should not be quoted. d = dialect.ddl_compiler(dialect, None) return d._prepared_index_name( # type: ignore[attr-defined] constraint ) else: # name should not be quoted. return dialect.identifier_preparer.format_constraint(constraint) def _constraint_is_named( constraint: Union["Constraint", "Index"], dialect: Optional["Dialect"] ) -> bool: if sqla_14: if constraint.name is None: return False assert dialect is not None name = dialect.identifier_preparer.format_constraint( constraint, _alembic_quote=False ) return name is not None else: return constraint.name is not None def _is_mariadb(mysql_dialect: "Dialect") -> bool: if sqla_14: return mysql_dialect.is_mariadb # type: ignore[attr-defined] else: return bool( mysql_dialect.server_version_info and mysql_dialect._is_mariadb # type: ignore[attr-defined] ) def _mariadb_normalized_version_info(mysql_dialect): return mysql_dialect._mariadb_normalized_version_info def _insert_inline(table: Union["TableClause", "Table"]) -> "Insert": if sqla_14: return table.insert().inline() else: return table.insert(inline=True) if sqla_14: from sqlalchemy import create_mock_engine from sqlalchemy import select as _select else: from sqlalchemy import create_engine def create_mock_engine(url, executor, **kw): # type: ignore[misc] return create_engine( "postgresql://", strategy="mock", executor=executor ) def _select(*columns, **kw) -> "Select": return sql.select(list(columns), **kw) alembic-rel_1_7_6/docs/000077500000000000000000000000001417624537100150675ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/000077500000000000000000000000001417624537100161665ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/Makefile000066400000000000000000000064551417624537100176400ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = output # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dist-html same as html, but places files in /doc" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dist-html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html cp -R $(BUILDDIR)/html/* ../ rm -fr $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in ../." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Alembic.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Alembic.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." alembic-rel_1_7_6/docs/build/_static/000077500000000000000000000000001417624537100176145ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/_static/nature_override.css000066400000000000000000000005271417624537100235270ustar00rootroot00000000000000@import url("nature.css"); @import url("site_custom_css.css"); .versionadded, .versionchanged, .deprecated { background-color: #FFFFCC; border: 1px solid #FFFF66; margin-bottom: 10px; margin-top: 10px; padding: 7px; } .versionadded > p > span, .versionchanged > p > span, .deprecated > p > span{ font-style: italic; } alembic-rel_1_7_6/docs/build/_static/site_custom_css.css000066400000000000000000000000001417624537100235220ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/_templates/000077500000000000000000000000001417624537100203235ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/_templates/site_custom_sidebars.html000066400000000000000000000000001417624537100254110ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/api/000077500000000000000000000000001417624537100167375ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/api/api_overview.png000066400000000000000000003620751417624537100221610ustar00rootroot00000000000000PNG  IHDRC)sRGB pHYs N Nw#iTXtXML:com.adobe.xmp 5 2 1 2@IDATxEǛ"`# QtzgĜLg@=sO LgĜO̘as@E%*AD1׿ZfsxwzٙwUW78C0 C0 C0͛Xy!`!`!`F!!`!`@DPv+!`!`!`gl:Tmnfj#KC0 C0 B.G7j'<ڪ@[يg!`!Ј4o߯> $t.WP#۷o+h޼y U&W!`!`@SA'2*/f" ?;ZȵhѢA 4|*Pf櫯rƍsݻww-[iR>;6 C0 C0Af5CJt/81cƸ_< ,l!5Z*ˮe0 C0 CF92G7:J̙SU`NBS$;K!`!` P!Ct q駟r)oa.rw[oSoLh;N?tVZ/qwuꮸ ԭwqG3<:|oNrt~aۻs=7uۄ uY7ސrs=n7{5OSo]38AR e9 k̙rرckoShY!8qbz% "?lF*a!`@Aբb%D:de^۽ bG'W_ukV*MT.[tEqFs<*S9ˍ7ޘJ3\wM +ҥkO>wUWI%nW\[wq&Yt8QJ uԻ!`!`!P j A Qy.:Bry1s#u]7|,=u]eݜ_^4BV[~UVn7=5vڶmM6DCT ~mbyg}D'xbxz#+qjQC*%۷wst5m=ֽE5 C0 C0"(](%9ꍍ ,x4ӎj{|5khGp9]Ja!WLu&Anㄇ!`!`%D fH,pHۘ1c/^lXTD~H+A4F[!`!`@"Re!/E;ޤ^䙸ݡ."#!;O?[z D}r!`!`5|h"sT+jzr-N*k׮nrC^xwz:'/2eJyT- ͆ qKnƌB@ "\'JƎ_ݽ[)E<)Č|2f9jX׷~; xcNZo4мi|FZ[YxC0 C0 CV iƵ3L2Թsg [ϡ)A,!{h]w]81b\gذa{nu2B|M7$r|e9kg"8L0Wxs߿?qBŹ}%XB$e7OcǎSNy y&|'5H+aO.]$hnfO"}Yg%?CUW]U{ҦʨDHa~hb!`!`;dHb%C=ovJZj)w]41Cuֽ9EKW_ Xs5y䑢@.o;<s10g);'wy{wvm'nq!Dӳ[JZ#38=BܐO>|u֎t) q+O=c.>n2rHuH+2*Kݨkq}!`!`@- ՁBH4-- C^ ',a0qxr-nwN/쮿z!7wk׮݀\|C [Q`C{=1a?y7EBܬ&I;C0 C0 ZC - ! +)*U#s]w9#„ BgyF qh^{ q9c%*,s=r/? Q$D8Yݻa@L2Gq 04>hw狆TJarMhUەW^)|pP).dд>C0 C0 Ckk&Wf[x(yI[n %T@vuW¤8Q LBB aX$-IpPㄤr=&(]~s#R!`!`@Z2N^KybB C=x 9:d}Jsxp/4IaCbDz.i-2C0 C0 C Hk&д̜9S4/*-% UXyiӦ8u0USRh|DI)2 9\7|g!`!P  yB5H2&r}]?F,833f-ޓ5ʑ֥"_=W {g:UVYE#Hijȷ0 C0 Cvh AT;͜S%B!B5;]VT9vƏ/&,h D!)*$N0 C0 C"U3TK(k nX^c={x2ĉ"o{C0 C0 C Μ%dQW r嗻|LNu+4Jˑi!`!P1^eCfqh EtǒGuw}ċO>r|w+Bbn+tpoի{s= %O>,xK &V[1}bw=H>N,H+d_&!`!`@SL=fLcR.Y{ /JW_ukVf͚%YdS7g yܨQ-Y S9{7r*ol{-=z 2Dj$cǎu8/@b;BoBب_51 C0 C0rEna rjYg7}t;W^yEغvI[%X⦛y?- ҷo_׶m[9֟M6D!* -v,.˺Jh40_M7:(Z>l&w-bO 6@@8e˖su*16$AeK"6 C0 C{yP):1?5t[qkJr%L>31ٳxm% /:u#3vp.O yV- L[i ɽ%a>QZZxOe!`!`uEsLg`;ō3d>̙3հVO>+/޺䭀z2GɔDB8V`zꐹ?h Œcǎr0ZF!`!`T!C1)CMǚI q@Z= qZ;4qDw̖ KQo#{LrᆜM/?M2{f5o.unOo?AEzf-.LE^5k_/wmoY_5kʵ#2W[{ /:O?ٓƥsrB_~~U0/Zw^8uSܜ mݩkաK*̜&66]s-uL3RPJF-/9aVm"+ՆHasA/;l\ۮKWy,H~o"3 "PdH;ºW aG4аp>s9ʕZW!NEk +>8; h5kJtC2Tl?,7g,ޮsѯ?On~!v ,CG:*|{|4!|(?Yn.rXH 70_i޺}|SǮciݩ[>qnI/64g2oF즽\?{|R76nA~)(*_VR:Zk_/<sfLmP=<X/r_ .Aonu&|>hu[w31 F.\k1r&gמ={vru { BH/}V[m5L}Qqg8ziI]Udi Ar!LX 9)a6#[z ?]t0[Pnh-j[yXG.m溭ȸe?Ǎlhz64;,!`4&u@:h}I@Xt9i gG^ziYhso-.xYg|+GQ&0$D!i=H}.2wEc.L2ŝ~<벐MP8-_]׭5nMtߥ>O>qp@AzS5?a8{C4R+޿ۣ]UUW-7!!CJ;Ґ %CMMjja ~9lqYv)އzȭ{a5dRRV A[6V Okձk]84 d+?OѰ^pقFD`U7uZ C{^QHM0}{Wݻ6pCWtͺ="X[wZحxȕ7q61 Fnb94 1bF\eWvF*oFe.Ҙ1c^{UL\cڴig oc p.I Ԥ5Bܿs@ӁlV=pl]!L>=5ϒx\xtx?@xH/.|k23 ھުCnj{CM^eҏ~ 4!F|\qPVh庭Bޓ-y7#Κ5la76[\x]=D{m(W ykfEi*iulsu;a\n$o'?r!:JwqG88餓\=RK-%~m C_*LΗ^z1裏v/tƜSO=]vewl&c*Du7→e[ln;ѐAW?D[EqN=ꨣ*{= gy#r 'f5~9߯_?gm]wu.@3 [dw%9t[o=yK؏=1@!`@ݐ!0T@BBfVt8IltȲ(2'Lϻ^;Z/(A M ^=d8gמgxX:J+(FvMAN 7[m@q6liFF-arK?~B>;,ANLBT >n#t9q'9K{EGoB%\"#W]u;cs0Byelh/x |C q{gYڍDtJѷ|tqyb a29}e7a`+y뭷 x|w]s-/9rAxvxxC>Cܳ>(҉C?<5 .$jOM7$ڜ}:C#EڈƣftXC :T4.oȑɊՋ@ Kh\N%߻xooH='nva}̓>/HfJ{kͶ֣뱆-tO@!9:;@T4Qgc,fuBw,2h $8{B0b~رmM7uzDq 4e> a9[o` d!z8 OSrm, vbQƫBGzÄdJ}1cHIIVL;rL\N5B\0!Z9dt!3P2` cCdzҥV$I&^x^{-qn^<=7ߔgVælѰcC0??/eX6>T s\6ӱ1kOdN#<"k1uj}Iŋi8_`>#أ3Ȉ<J3>묳DkE|t(ӱ#rx_밐Ras$su*Z̢ AÞIL0EBۃC*@ 8hT0tO~OS5FcՌȓk"O"1*l f 9~hI!{ORIP G;N'S<%^ Oz?{bOww#OTFޓLvda6G]Ư5`/5 tUAQ|F$[1%>>7{4. <.Xb:>Gcx4?L' bC歩+L6pV& >dmm:9GZSLNu ^ǁy3ƞ8m`V*C0221}5j1#WǘQą0`NJ&&x\*]'/fLDTG$3e`L(3C=T.3*)M\Kp J1bͳ="f CfkGqX->Aؚx ebQ@>^%1$`]\>#mk} Nx_9CwVwMV8bעiZf~t@!`,5Du4CL-|F.xor`e14Ch2!'0KwZpCaδC]G;C)"h~/}HCj;sHײ]jʒ( [DUI*W$o V9&i:T.L-E7w 4L8g܅O2|ps9-Ǔt',Ћ.({ytwb9Ih,S~мKjJHEaf'ucei. FZ#6޽ԧ:m4*?ϘZ-е \_? Qj-s5@SkJ @aDteue@x„ (e/H G!?R&]H|= YwuK?-br蓢@ѣkӦlZh&K+RÞMզ~֛{ץKQGrBD}'5(.`$GE+ H#$gdO>E($ĉU ޽{2άY\ǎE+FC8GVX8.5K-4BݺuK_C>ê1_~ykLlxao> "{֟! %BEX =,BYWK֭e>:dlo A$Ν;%=uT!h}]袋N:魩=8,RL G~ĉ &SJGHOD6F h@M R4jo|7C5fdE =l:2_u@IDATxzA\{څIeP6F} 3f%C⡷k׮p$K/$Z& sϕxo?|W^EeY:Ԑ8Zr%ܬZB4W_ȓzRq-%\Q5أq".Ho .YgYdw!HW]uU+3f-¡0`4U${)'8P.B\H%y#7t` P>rJ3b:fm~(slfr >-_DbС7ߔa0I}My>gÎ =z;{-0cw '8qN4f?"h8G~i:w@E:J*d! [oOC+n+FPYR믿&dV$5b^?ld~lS4x+Lv!`̋7~fyܙZu(MiKGҭe~ +WZK$}gz=ߧJH;V"HMG%D EtHO4IʂiX\c6F_~r=άBGSߣӽ>HmFm$0;C4"~5j2D_G55x~G T RN^鋆dH~! DL CDA `.0W_-iM>] 6*,4?q!=J@ "Akuc愘bfwM7TA>Evh\jߊtRV2D )*`+ǎ y:c?.UH$[LF1T[o='a09vfQB~e!UfwǔM/ 0l!*G!`(խZ"S} 36`V;th#twߤIyl"`2 VhFGM'j!'ȁJ۶mOѹA-yI,ﵝvI/;ߦj"?|abZcBmCkaO%Wh|𡼚fU.frrQ}X Ad1 5=zd($aKkIdaTW_}uڧLڼjNøy֨g}0xM C(5wƔ:m9b(N1*i})b- DAk&-':`80Va =EԄiAKC@|Zv=k8HxNQN=sxYP裏ܙg)sLx iz=^`5=g`m7x8GCy9i| ^G<" um@NC폦 *N/2ˈڑL+5-D# m1XH $ZLFyvTiR+|2oH5SmbN9Y_wz-SD?64@6[Ry%w,ق}~&q@ۃ\iE->g&ipPL=H& E;јmP7`]xL)Ew>tPm 2SA7.hzFy+t/2ÛчW͎ƿ;H<HҚB Br)F1hƳ&ba5sL`1'>^r/<~'v`8$\V39M h˩B%>dɇLbHCHgo6yL5iy,4~l❿B+}lFu7et*[9WϞM'rfTlѦk޲M+Kr_*e Rk-]:y&31 B8HEJNg`;mwH<#! j5_8W2Ȏoo;Ѭb^HZHD.XREXF7lh`A4 }c\xr>+dK=ypcF 2S˻lB.ٴ-fd(̈veXYA͈V].b05|ؼwi.+]x"IiV"DY)?NID 7n|3uJPuZ*ETe_xM D3:J!t~zk:C2A<39K4ʎS fL֡÷}A `7QMG5õFtK+%C|x}%KuYu&T`NhSY0v Pb',}hj] yM',p.f m%|& fl*h+i\Aҥ IC utu ChhD _SH7,p5l0IB󪫮u4ttOXRuEn5^ P;LԖ>4ac )1v jwJ9O%o4J\ih=_ M./Fx3F[я~-Yb˧{g<Y W=#=<B>02FO鰰Fv05Ɔj`dtX1Os33'K\`h!|e}pyF;@脑Gd6]0M@$_xjlQ2s3F2krVS=LyD{Pg%^7L!+IYIhʃƆ-WI"AIy> A $wC >jA˥5 Y[rA%ȇu&.FPb'q0d>J(4=)aF&1:IQMv!!&.>NintrG si]puwRJ4Nh祔yKti7?Tہ q^CSڣM5hwg&۩퍶7"dr!Cd el1'&#y||p\vR-6 ɜ;)RBQUK/wm@C\Z>J F$[_3CV3 a> NWyR4ѱ#"SvM7[>s/˽0EAǘF:ߤvGVZS8r mgD7mLQ?BaXe4 ` @dER2D4o4Pho¢`FG*]e6WM')iJI9Re5-k+ 4 "h A1IA0 %\d/4h:ir cx&05RyFEXc 1A[".4aC j.t{W4/l.~yv_x%vjScFy!8')AS)i穮{ѾDfFH!YEM20G\E5EOu= -s"O% p=ir"˸ޱISkߴrsMmCM +kBRYT羢ÇdL#%0T/?{)-&$(FF sO7'Ϩi)Gӧ M\+O0^l:_fW}܌Fv]?8tI;|h2qЁ?$ PW̙8׏r&jСqc;lir R9 0VFU M\ġ$x a3L* ppK׹ ygO>u%ZӾG-ec!]oW4!MZ_.2)3GU.5fjڦP WkFޗW.hhE1ayl t|(reLX%(M ǬsFoy3dk?(6LQhdmbü4:&[cJ5ԅAD;FS[L:ޱ0&3wB9 6 ,^\?ƻ=,d}!1at6BO!AۍK7$|x=a>sxײ`(SRʕZxcrvBx۪ζNxVrDg X}`~>dȐt 4q{HQ>CMhSC2G"v9p3 A4zrxPKv8 e"7 [t!*HCIJ1СBŜA> &#|gs>H[<.c>{q1HMP>r'~v_$EP+%H3MgQ[E `PxvũE;YȧŎi(.ѮJ| <;'"aؤc}E9mat==QNQ"d4 L}\sMyqmGCՄz :,r6 <굦޵/>lIuDM|oy3ȳk"#ߠӑW-}rP~^U'Fy3țlHND[7ȏFy\ oVϻm{0/&R;>w.z%7V !9Ϟ@\yE?ϜZpNxЎ3yxSψǻ; ?!qnyi^$u@<. O%J'xg?Pyms].e??J~58{soxâTy ~TU}<~00#Mڞϡ2"p@\;תF~-؛GG DEӟ䝠yy{YѨQR߾}#!it9?&YoMjL{R_'ښȓ[PȻsw[Su^{ .}ЯJ?P)IP(w]ɫN;mLF4.M:F1kŤ^h);E-J.& 6=mD7ƒs?[Uŵh>[Fm`q!$I Ü emcO0a8)-y*&D4}Ld;HZkgL aML1_:ӵ9޻%}K±n&jmLװ@sKnZ;$ɰ!/uhsqEևo΃l9汤AyR"e>/eDOMN>fh! ,)(+HL]?B[DzaH&e7bmQ0LDuH`c;tr< 8y w K]kzg_5<|a&E}Q"<">dk:lf1.|F7|q0cp' 9;Lհ?裉"w!HO81]%!]m^$怡g V78yޠmK*_³997&E@I Ϝ=g!tuϻ;N]~҅]>y0żi 3_r -x+ 4_Fzq +s bN(#~@ hV0KUX @@ƛ)}Y:;\a kޅ4žv³X'=} M ]9p#0s$\~3Sx> +Sdy@ˆ<#LIC0L=/q{|CL;[b]踘:Hng;KjS_Ly1)ӑ$u՛)nA ~F{1]7#`xQn_8ny/b b5Ђ"z!#~!00IRl:||(f:1 Fpr3AsxI'ą9ƣHMG_i}&zq0L飭 .H!Z-h1H{P556>zl+_qǘɁ("! lAXo"N<#%\&yRL1Y eLC恉|;tP.чc||x, Ua|?\ q>NQ@xɆ;܊L`Eⶣr1 a2b0T޳152dFX@"ˆd/RY; L.v\&6ٚQ=e>9SLd3!-6 +yaMot-WRtK85Rm%]*yMJ\M)ŻwDH]ZLLJ#'<𼩉{}JfrI)S0N0B2$v 'ͤa)K?ਇyt.,$ıd}79LF?q,7`ލ{1ߗ~ S%yVDHcmdK 1Cg!6 k xQ 0[CLe'CfLKH' KdxPtDLpB UytB<(̄)N ݎVt2P:/SRU Ǩ{Y93 Ã'H?6ʼ,i}rNF@;aWl.}B(`&dkL H Լ$'$~t7uORLT%i:4)dgf4121-'y ~/.q,W 蘲y4)`k;e-85Ymr&cy802M50OF8Yo$8Xp[38C٤ ]\0[ p|/$Ӂ21_=Ŗ9&<!w}NabaeI)+)e'CpL6}hHXuZ7^:`\ ů7! –k#4&_)+\;dFCho桪1Ϝ&6{6C}\+ne] q}K\Qe.:Cf !cy@%XCBܣG{ʀW=0R#1i<1KK矒/\[!-})trxQJ!87{vo܍>85/̃/JbJz U9C]v!r)g;Ic ATQ!0l P̄sɁ͖>ojyo5e=r &5#L!@#gr|3)}&dp3:#U S:* |pNj2@|0Ob bX|0L=nIu^-e\v]sGerRn Tɲ0 F(Kҹx\wX{3u>)'c}ND}: 8*C.P OF&?cZͷ>nj A2q!>uG&9|t5 J ¤<1);W_-E8\dme ?&2@\dϨ5JZK:|cA~3O.)$>_]qq\:4Z&w;NSӻ3X"җ3E)JlI&at /Q3ׯ_? F`m >6k(9Ya8-#szQ ӏP=3^mߜQxzV]hCy :u+e>’R)9{sw!fHCCRH_ ^!1;ey(=@Kķa)0/#˘n+X&lqd3XxSUE/HXBX&! |!y·)$Bچt_L G! Lom?,^o_K$ W FH֙Ci"> mR 1H%Ŝ'܋O۾v`@(\%B0>Ǚ'uL2M%'/hlF5Q>HRyraN?|Tb !^dޫ^>45 R{%B^t~6ǔ_z9㓛y6)xISxf@q--P|#t#[}I9kTGzp6pEwuG{u܉(:*y=R.Ut1H:åH4xA) f`:<_ [}hJz–Kbu+}UԌ$HatĈEcPKX AH[aP~K`B?Ski Fa-N]'{kggi&~/%BPSP5e@=#Ǜ %zk2z>ܣ\$Md ŏ3G$P \"C"aR Nj ZՀ'7~ǢBu㣊;S?C'," ާǘ(z=縖$INfr n:#$[^W穃3rܪ}'PO9>0 ;I7e̞I.$݋*顃M?9I85X*M%i{!oy)]y =-^!_L1 SA>Æ SKkٌ9Y4c,Z>Eꫯ.S3Z?b ĬC haZ BB!4I MĜYH[h{3MQ Ea:vl+ D/YmmPg`ԧ֯ߍ}:{fIǖk.fDNV|_U4 0f2t@2{`rse,&< Ư*#|l!"Hs+GaSt ӌ#*2m+aG,vD!ZlpT,3":#0st<piOfY\-pnKAa9&tq t\]L QD=h&`+M>R/S|QlVC% @8x2 {e<vv:0._;T%a&03: baբFh`L]A84mwx9{_L:Ѹ 愴R u6˳<\SOԍR%x'!4h5L4)C58Fʑ'R.+GZg!|\b~~l9rBǁ>t8C7+>xVQ]8^z%OWi_`XarY/}F#PD^1rɤr:v檙Ǥ)Θvh+֔(EYO:R<"n:m:C\H1@9G WS:C^b~UiCAԯ4!ǀ :^qtuL|SLB(mw4uÜ,1)>:QCw6m9b`|/`.6!t1gs\0W'Op&v[!| Z1' s=PV7|2@%tCz%+{'}e 2y &I–:2|;@BH2))GeW?b,F\˖q8o|u;U7Lw7OɁ Sm!_X["ثM9: "ܰcȜP[U;:mFDI\zޫR{Jcw4?Z @n`ez0юƶm%X5.9X( 7 wkBFIDaq=)' hkylOָoiѢG& 4cw~%5EQ4,=kt)5 \dJ'ۘ@,"DQ\Xx(kD|דq 3ƚ"nF[(̊apkvom%8?րٷqmj”K4* })`פ`J Vܟ{5'eŔ fI]\Eŋ@K /7d2ydڌ 89#׻ܳd%?wg6apW\Y; apW`0P8ǘ\  ~8h yq1 4fqSf\炕sm1OcbāH7r͓7Lp&^c~5Iyq̕ N83o3a\s (x~oF-r)\oًsr[$ n7mt '\)7͇\-kVR Ϳ1yqVvcif㋊9Mi1{^)wfڳ& zmUK1Ʌ_8MX,kDj<9r!4L#$cbc8 tc cX(@#4guV4Ε5לp k] ƵX[_\M> ֕c. @3@JSF $$V_97 hb?)<>M#B ɝФ3iZ5S0Z_YtCHe6}Z1ibbcamMVL[\XmXJ{0Ryc= e~6/",:,zm(4,=8X{ + K5A5n'u}W󎮩O;  \0-7 ',ߡJ=aD!Di'Sr oBx@/emm !sgd "QSA0[dԲ\vIMp~ʡe> RMBDŽ1IrK&+X{hiztf1'Ӂ˲>w *)Ŧk@;@͉&ۇr! $&K Laػq_5 %xÚU7bo~zuggw[mjɿsW&e.Y~3B;>=]ͬIA(0:@E|aBYNFufIV=- pp[g@ZKZ]V8% 3B+z0%m{ѬR OxB[ 4لàwX3Y((Xz0י&4mi~Œ9j[YrJ\ C3kQB BM%,@Hh?ƃ9_%cϓiBU͘жQܠ֨3b, 6`hAۻiER \fSGl&(w* V.^<9XM O.x$4xS/[h) |qL!,Ҥ{=v 'CC-Qc}ݵ<01poWǼ8<'rUOpWN,&'I|D$ڼuʅ+8̀o 8$(x5pWOu1y&X'6WJːu"9~ Ā@]Id/сl&40fp\+ _ h hAf!y=Yf*Ѝ[wTŵ.0YhM:bX1?}ktU&(/mP BQ?&P76B뱏A{B+lob͵\P0< \)жAqa-?,oՈ)ĄoF׺GL8y޽W z7V;XR(paW2#D</ŻeDLo:EMJPuAB (FYJa[DB&6/jB80'KSC+AFL0|F1 m@\깬=&D[.uL\4D&Io}rY^_9ހm{-}c;`}e_N BVy0xu(-؃A|>xJ#qI;; z`Фp%䲇ŐЊe\ȺbB>M덙e5G‹mו좕&i)rn]qޣ #XU\91#,(|?SIs\cX(x񾮉&ttve8R,sm)+J .tBJBy6w/c sƈ̏Ƅ>^`z1YCײhH& lA\7L#`T -xR`/GƼʥ#48m>I~ΙD7$MN&{" 6~T xp9=s)Pche L~wV/hL `bZp%c1"(II$='pseńڰ5P=7#ܡX`ĹO"4 {8T47Uť>06X>D9Gr<= [izZ;=-hc6%mI[v* L4f_Ե38#M^L6Iv0q"Mhg 7}XM6it@ ;V&O/ws?a*k2emJ1`Mi L0;I#O9y)T<CBZa=_t_onpp>Ƒr-BJ>qAhD`%-5ʌ$]QmaXrnhVSFbrf?w%s[ğ3,Db@? 4ʳ=)OIn,AmT0*y2A\vP8k(AY Tx)T{R{Y$Y|o bw X冁M8߶'yh:rED>90m[~>OBr^=95$eے+C׻<YjW[{ ZfzSeW3uLmPZhW Z`Owo*RW~MuJfZ߫^͜6ך4kgjF7X^PꌟZի]{Z1C=ReC}0@7k2Nz 6gɶvĵ'`A׺~85O/f^^Ԋ~q- &'?ǵGPov:&*:oN^-`jeF:PWIȽ:aMRں] gx䲨3h-c^41y}S%A#ks'.?;m& M,;@yPԪ蒖3Ϝ;fS%\X"E["z-hzIY}d:;n&%?=k@e4~׃_cHXh iե76+vMBmI14i,-bzktz4lÀ96hHez>V 6Qojš\O\($W8.2&؅f\}XQĀ ZH1-qhs4>-[3T){9sǏ̟<6opFK\hCd*`r'CӚ3ݸs:__<=x~ikʕ:FTvX/<{mv4֍AAעqXnm0u]3w4 7pqv<r@w|| !w V!V,$h[DK^_9@qSu>L2`d .{eA#L Fe!F.2% $` -X,ޫ<αLQ&NL]`"hU >߉OӺbXN?g &FP+$ mHZ9 ˆU(igBQL\os %dp'HŽbb6h"s]恓1\;Y -m&C+Ȧ‡Y8`ݧ\q_^U[,RWȄ| hJ09ejO3bq!;oq:6 )m$-CYs4C\fڷB3¤03Ki\:B7&I8:$ũzL&gw'q> Z -sM;v'qc *?;g.0q"| )$͚=0,ÒHL} ^se_0P0Ў,],y K{r``~ ~s7Sz*C! _#7oM SyoCЯ/-,@[xU^{>zLm(M10)#۸F5Ӭ/ ,vY a3ӊL&7/q:B `0x[`h1H94` Nߔ& CL8azR$ƀ?]<@ Qn4L|K!+"&dyA]ETiviKg\ ѥzI)C)x^HrםWOeX)qO/ZkRf{ʸO<)=3uo;]W ALv[3p)mb=pSS"Tg]*+I#qJ[k͛V<˸wjt;%]WxZMb=}\J^4QjJJDW]RJo4c-%+,Q Ӹlu7#G-Ze`5V0tXü} Ih/9jQY9 XBK.yQd|[CF|!k4w:Bx4XV?.!Qs̝bH5.}.,Xqmi@&Id3gʯ[ymٶ6lK3hgR|8 z1D훌* ?aI N%dԽ¥6rk-WWAͩMkm =ܻ1cMSJqw\E5.A7ۯ-=rac2ro~h9F@6 y%t!W%HJG-.s1ԟ6B16qEzddE"'CU[b]8wͲ-@( =RrBO{\юgX)q!:5im뜴=!1cBc#tpA9ks ZW~|qxZHP,%h9#Ksmkosu'$/!rsE3ha~a70 MTct'6-fJ$Q._&˕<?1')01Jk׮M ;8a7wd'.u.@<0  ,S \vđU,A_.Ei8rhSʯe.B^ VQ+_$&onIH Yvd@[ΨYG&\:@-BڸdКFUJ/ w#no`;^mq)ոpXMͬ!XϺbH+7=)$VLa %w\Nscd#YouT\kkt.$9&v#p%X5yMJ]b$dSr܍ LwIݍreLpV`J79ټ#L1,\ r h<9 pfis$<5k,o)&W yAx`=@@f,NPgݩ/xc Lr``10@P!؄cզ6q|3֔\7!eY{X Хĸ5pМ:U܇U( qhzV%qJQq=p+P`e~v\\-(Xt L0+K@\L>i˃>G rc |]$ Ik!̗_Wbo3ܴ- hAufwՇћM8ˬ,u Q>wq]( X#|!KjaިY ?ΪDG &9M7; hc.Ш/hg~nq yGHX``````*cpM¢m3d8A@1YW˥p좺-td#8k'יHeuN.9\q$m{Y^#b%difҐvV2:.$xJ#{D{8T 7s ',^-э(?޳ A ) I ,ni"HB%\S9bRY%q{lx bFr,w_k$ZxDlMXI̒0,- 3DKGy̱;6E#[vsl+Z΍+ %U #3h#msg (GEJ\jݧq]p{^I<`qm@Ab0XO_D W.?ʫT/(EVfǒJl#n3T~)K?)˔.sRL$8`a ŜJ$6h Վɟe~@\+>_1HF8W1=.¯֋cX66. 9.TI^"3&W/Н{Z t30w 5/TQ3ʾ\pzwmXye`Uk߳>$&qOdMTda{̗mܬ#}ƾUJ˭. oM0ˑM7fl!m#RslZk,CځYWO%s1[Uջzװ2JR֒]6Y=shcaa)1oJfG\y\Txd%f|I[Z<1sQ6^RX%pT鴥7wy1㒣XxtGڅٶ/r:1p.M"^ btJ[`6MW>C \X+xNCi@3?{js1ȶ6EmnmqO$1ƻ0xaAj BuIȢJb-c==E):(M*C# JC Qƹ9TYY]'<%'s=o 72g l %:l H~%w(o?u,PB eUK *@>k/%#H|ހF"E ZܴVڹe! uYIZnx\ r[M|4 :LD|HQ`86-,NlZXLtߠl\qM3LO*ڦ.JѶ#N41M&!IWWQS}ZUY p*?NK:ꨤ(R֨&,SmEe+0{ H[" > + !"ҏCA縭GY[X`!bnq(o( S XW.2)eaԥ=[_KV+c6pj7z++C&aF۳ lT C>*.cе,?:U0&usFf Q^&^fVG P`z1`" >DO%I'lki oP` _ A`\+<eߎc9.Θg;3 }7Am9il rwgG_<7վ/i`| N +,jDZއT8*u L=t۫Un9 Dk9eB' bi@x4 ǓQ6BG3_sMw|J<&@%O _ @IHt hԁaKJ ʽI'xbflVI7^>^ҳb7t`+ [|!]W [w-nVf,> غ3v% !7MP!9!!nV;޻ehsg^: *3Ad>&WNVs~PBcV'#\FLM|KdiIii L3]kǡ! D\ #V4'< ): c. wip[<3(_`}i|7oSFEFFAJӃ]c;s'?'LkW S) 7H4IH[G]'z{3b0FF /xA2$),S00,S -'vuJA" +Ѝ;ЍWJlIK Y;%\aZ 6j+.}bMũ,s/}6p. eݺ:1Ono+`b;FsMc.ʮH Fvm-W" R p(yBd?[nO[SqqnZ)z^{_^ٸ[avi Q' ?_V 4^-1nr4NWinàpi@N@pTyxh͚Zo`VOh=?*.|1`a"oI5ocrnD ܈K!ѓ[7|OVo=C>gm97?j-7Z#P͜o BfvY>sI4/)0v-eOYH!5v.rЉIUzՌ_:p ߞ]|ćqqݨw-RޟF} L.ĴG M\7wR$q{cɟo"))Y,7Y}ҚI3=ܢ́=¸nV3@sA-)#Jt?\k*-C&d#̥9]ˁ &VMmˬU)h^\>9 -P0P00@H8u\25 X+_hM)G vfZjBOQ=´bF0o:hDŽQQ5A07$ 2A%`xnhhC[RO}p}`_җ׹ q m{ܛjZwcEG(YGm.vaiK 4|g%oUYJ|+jK?s qǚoмR\ Fh1 I07, B(m/#e[Ƚ u!w׼5IcFC gis& `6~906]x?07窣c֭[5 Q->.8C\iGkaC,m^7$F*ZBRWqfާ0d}'bV!kX{yQ !CYX[ry9@5?j PhWK o" V~ f/PPآ8 8[:.M¹1N}9sK˯zdi1,XEC, ^kFظFAi<;Bg)F-@R,LdZEONiBIӵMwE#`XWK ?j''n5O~>#7^w!cn5A7ŚS ֙q_(pqMm31*;陵-ѯ̣\e\ A6.rpS) iD 8"mkX+.uh<7Ͳ2ei⚂P~}\Ǵ\ C4`4d1KLvYh`| *\JCؘ &fuq'X&!@3[?Fs'85G4'1kJXӮ8յk0`/&G8D1[Zs)̴)Kz<Ɲf$mD߲N9,Fnh D] C܋YN{ǘjڃZNELĵˤHaYDX oK#*leI\+bWV@Y70QC !TcAT&ܮ\sh8=,)Q7%[&o|c\랢#`M/\ +MllcN1#$V!0P) xqc!Be (!ו/!(a /7"h.sk,ۂ@j#u{]ZCF'BRub2v?;'"SNISbЈ * "=%c#A,L#@1EuMcNV!Cԧ>5%4X[[xXSn]q#ZZ9 n9$zm!O:5aJ1J*K2wmLEM<`p>!+a'!t-\2Q<-H΅c_`:Q;}-M(P#G 2'BM)bv9LWLn6XW+RWJ#4a0kˁ*5Gx:1յ?^F8ru2&8gu 1d7bMũ612BH1'ļH g5鄪gELz'[ K%f-?.0pyUmw M9T71hWl6J=V!{כ}w_v /l 0V 5>>Hpuѳ" 0XDs"D"pci8n,#y\Cl\Sksk<7k,jN&Vs}woq;Yo̷ޫ+/_Pa(l䐿kTYyRr1mHK0 Ę\Ȇ*NP U喯(0w GW[.mc^*Ǹ !'0q\`կFt<" 6Ԧo`D iޖ0d})Al-5ăߚ &FR]XBǻ$&*kn ܗ|҃[_A6b?zǝ'0hi`hIy%os]4K͋^j}M ꥅvC10[HoMoJ٬C`-NΕ|aa0[M~T(((`@c9΅!U(U8C#L JT`'؇ףoo*!~vZHླc椓N,*K=q)\9Lk֬IkXNwQzTw/x!9Ym2IkJZǬv=3裏Nk*p9~2IJ:C+*uO3* @`swmt;I 3$q燁Z NWc'`|>L9 ZW9}BzsBklO9v'sq=:mn0*ckƭzQo>/Bj^m)eKB%g>Zn]rG[ [6c6_Ml1۲P6nOBgs(?^vNs̷pmD1znhSQugy1mO9A rV !0` H -['>1੃AHGZ"6psA9CR p폁]ODBt:&E/{K,QD9F7D&lT師 v)x A9.MA(ۛ<>i+~0U1|c yTcb|~'8aZ2kuYK H;:GGz',la@IDAT0:.J[81٣96BPsPFm&J|n ;M/BgpF1 џog6x~k=*-CR{rrA:9XawOZ}sieq/mÑ-=HaV$ܑ#:($Q9_J)DXV1|ׁkT"*ώdpp c 6"-|ЕJSN9L(zғ4\߉6^$|D9*&h86ID.!pZ\uJh-&а,Sxl)aNR'\sE/" F~b~F plƫh\JaH ?PB[: wy(͚5Z0`6+_JҀ՗#@q gAό.hgryk2!B8isf]+]kji\η7x7a0Xn6}Ppb Fepyg$4Ѷq<h)K0OA 7feq<_[lU 38:u̾· JcO$ѓ䊋(0.)֭[&a@*%*Z⛲s͖Iw^,-ҧvZ J@n|dڨEYĺ u ^T=K̑`cPPR5WypF,0~s{l(G*w x_)~MRi#>f!?0Q)arpؠ#6qA8QyX[ $`fұ9#h+ c1T9* Eg0bbX0!i0}ĮA \.5|D@D qJ!4G߉w}ǫͶX=js_WꭟϣnZ=`@c>]pB!G # $S%dLH?%FF p`) gjTw]vIA 3y'>2 2!JrXVRKM0Ϝ\]n Ztu$S CAchGyde=k Em`r'$U['4gnZQ@ir\ G(όgӀ#2!rt ME.g$~DS:9z, CFt. ,iYYgk.` &( !!0 D+֍čSҦ咵0oyI?yݼYAiѷf0Z=εNA]?rд@?P=')KV: kwZ>F9U0P000Qahqt;lSLb 솦=lq-r0D0GYF11*=Dxgak_Zz6-"e/{Y4o5pBۛ1A ρ*_!;X4[8C-_x}ͩ*M\K/p妰lУ^J6ǒ)xӞƇ9jq/@٧pۿVvq&<D@@%1u}@@;$3_vC&|&g{@1l wiYj[7e!qlD>3OF164 `?~L]#aBp>, 򐇤΍ULV{l='b5<o?MЗUbjSP/q-b߬WD;ꂻ)6j@ʋ5 p!3Yd(ྭIe ÕN%%r76SdJ[Iwzc#w)̯-cm!ao]m^'6)P0P00ج:zj161|eeYzVʶ͉ڶPzoīH/T;L=B&"Qp [7̵>Q{Ǭ@6!P˥4]#{gb$xسz0Z!V"{PBhR6;okv:pm0nbl)@Wi ]m&Ԉaa ~-_ ]~oIA hIAޝ,+ONնi:K.2&w[^{Υ-8/n|+|H`qloկNBfC d@HݽKiD0h|*7< :D;Q̸}{{vS7æ炐L Z)} 9yLԽ ba'Ǝ1V~2 xF 7AJ[7P.Zg҅ C6[5q4j V$&. pl~;[BqE(w8N- q?+!Bb#@僅SLk|qgwWOگt!hBE?]Qn{ ~,1nk``b`ĚtKL&yBF:!)g&9crg=D%۰u5[gT-^7t]/.6o+3m)qB7c> fל C)㯉ͥ "ܵ#J:J3̏.J 6ŀ\I`{nz C&hb,Z9O&* ,M>of `;۔-<^OBO{OC;֮]Rse*P00W @>'';O82?)$,x !@J:=  40`h00@L߉ C!4nv,\C`k{ !0"{Ap3M۳?\dE(Rr_*0 \>j-V8f[Y7;鉶 n0hIFp" kLU%~-C eAׇ  .R 9^/mo{[Z;#HZy:ʮZYdܚe%e,d,o}[#8'>ܕ-5ӄ&XpK _׿uJ!o )_צ I.s%Z3tY?:R wD}KKLJ ~R`2nt}&>)+uuw~ժl;+旘{0Gם3slCs2FAc_SPƶzWŵ \F sNPN;a{Xua+F8(N/q5,6җ4?CG=Qi;ꨣ8'?Y{xK^8}KyihuN?$<\w:^G}t Y:SOm묳J <֭K0t0dB|6!slBArܽNgAȋ[3\@oAӁ" MwM+ƭ=t#aTB~3\o4'!qZ|liՆZwz'Y}{xߞܺ%"oLCMyPzd,5X~pj)YPzoG>Ug1G~K_Jx`z?7TSݩPUot]+pT!{`MB|r!ZR& oX  aq`dn EqQ芾f-F,2<.:Gk>㚾j9࠼d0DšCя+~t>{^_0r7AąMˢh׽?qh,K\,V,MAx~>{JUN;T}HLIoֱ/0:A@\tF Ds)7 ;Gi_v_mq͍OCVJ^WjV?k6<'bx"nαЍBsG[!M+m~,@,]z䆶N:UO~򓫝w޹?]=OM:C9,;5kT_W0dM.ʕS&7,laֳ>;>Gi2KU^qx! A;g XND< xsQvZ5ҔD0woq[W;u\\azbA(6g{yl84]/nؗTUnsTxbpIJ@>0mw͌0%!nr$!\Đ$dbw l??'A[\,Cny[V>V7 $]u=AJm~J<))A9,bx3q\ܬ&2LMa挝+I,\(XzV5fCC+B DO{&qI?+>guV9Vg!YEzks_Xgun0 +ͅ^(-:-ۜkRʃgB}. Gr<^PШl\347``` &|Cʳ`vs!D 7T?h EӼ6-MX aDRdcAq?[C?YU SbXV L0R_sq0w1/.n Ԃb xBɴD6T6 !׺ʴW 7MIJ"nHRH0^M+ 0vyׄKꖗ_JXvGW;?ϯx5U@nSmU㏡8okrl,o޲R+6[:m^R-/jӆs6mַV=QVZQyY>+v _s8kt)a w}GU[mtg0my`ط_7/Z_w.-Xj(ЄG?K.SJǀ8,9/5fsOJ,[nXOcAiy#3K-(%  aC0T ,1 p:3eҪVjS0X_ խ ,Fee]ۯ׿sZ}{_e}8]Jڵk+ԁ:=}>66t,VNw9~ְf;-4``````b__&Vx(qW\=^n XJw܌U9iO{Z"5s?:q,5X,NNgW֭Ku[_stPY㏯=+nmo|Z}hZ9CPo7\P^JXVT^?ߒǧ9x;N8!akz\;mvZc|z_V gMz_ipZ/^}yţw{`ŵ[{k6ckh`` 1pY )l^i 5>#+馟&n% UtyB׽.)!T4!yHi<%xL~)R>I-b=Hzdt\bVO&i-5ǗWU_򗫇?i߾LVxZ׺1yHML`bay_ܺꪫJtmuAD9+k8m=*B԰v*|;W,DV@ TBwZ-ex2AO~z׻Xq>ݗ?VCcjl1ϟZzilZi\ɨɓ=eRQ=1I1]l'tREA]R?p@oϊ7/}cRy=p!;(J " 997yr_A2?򑏤&t=FI1,&Hl "`q~)v+he\MK'3M7vҠ/nwUơ6:gA:w߽zPYl?q:T'n~jޣ۟Wt|&Rk0o|;_n Lj)w-6rT|3m6HF7Q/})5ƣ褈SKͻ[։Ʀ"t˂vbg.eyh?H$JLn @Ygh0~ez]Zm6)&H\M|gD1hHM=q,7ur(!1YMYfMRN*' t$UxP(S8W\1)kl3F8,?30P{)xt``CӁ퍭½@f6 f0d.MqG`azы^6aȒB kmo{J"Z(kz׻*Ira 4y#V^OzғR&<6UVKe?E^.|180˳FZv&JK"nq$bϓy{dxfޕH9n)$G0z tc$PMR0P00F Mc|T``````"3 J;&P\s4._Yb\71Pnܔ+7{u@vIwП7OX     ,E pk.6iB}c{>iu c@&x|3I#|*    2<JЄq,GnMQY`C@V)Z0P00w o~2̽rg@@@@9眓ZoxoYbɋoO$Ԕ~)܊zsuHo/" Rl`CƕϛMXUV}|򙋍i_S-(((=N{w;f™[s(+%\2;- W^yeuk\cyԔT_|eiN@@@@0PrFMy_:-89.P00@ZzJ{S" M<>E|ߟ=CKӏ5WV     ,xSCnVr)f/IιV`q1^Xۈg<k_     f'ՍntX04+tVO|bugT^xa|+)#r8{UUm(`n+i;<'o66l   f泻:CsBkj=~U- N:iͥD@@@@@@@@@1P,CBwSܐs̶otZ}:NKK     *@ M>m!~ =T; |X|U[]w6Ā!׹uc9f5ۻ0[e%c_~~_V7͖R\[W-/_׬ 8=+zWub -N'%VEW{Z@~Ƶ?GT;[m3*~SzW*)((( /1;|ͥt悵'Fh7$U(0`׎ʏM$*pUWmWG}t׿o?}{7o_.|qMbqk ln:c3ah&>]1t>1\r"]~1{dno'b߿0/~+ 1c*F[?q~\BѸ뮻V|3Su=+pk?es{~>E/zQPWr`G(pN>~կ~T&gbhvʬ_!L|ԧ>?~" }߬rt/Ms[ܢ:oja"qw{zsΩַVvZ=:ex{o~IOzRO[e]VnܺG'O_+d"s=Zn]/}h*Cz#]~?k A ~%nŢB/N:hc̏S0`\Sb؈q)vy4OSǵ5{ν[Z}PUo>}, mZs_w߃^3NG;}R 7ƕrC[L$"?ӹ8o ֤x̓]Oз<?8sPA7SN|^Y]vY@K,jy<.ˎqn3'̷Gbain֒,7ei"yu464Gvq{O|Ҭ_˟ _~,X^z55.~5$^LKZ+M>nlGk^~|;K.dW* C)gk<;0>@#a浠dWϓ?1(*z4oPO{<ȚvbXWqc.p"~dZzQ|9^ƀƽ,M@+2[biwb9}.sbuϒZ{K_й โ 98~<߄A( Ahж:IA4hLM>Y!P{?!yH"H\@SKKY !(: V/֟m0}M򽂁= +O>yկ~uw{e|??//g#[ϴ qޕ1(QIO= }1XhRŽoZ{lNX i8F$~ ohM6e%0yxpaubˡ"$PL~ vugn0/!I~=o'?ympd%?2hiՍUx#qM&&$<_?& `}K Vn_;^7@@AHh(0>gi|g5L{1,o4YxY~kPQgnFƔ8yk (5%,=Q^S}6uY5yP!c p:Z 5g5(;k}x`!\+ }hrVALh\R,hIkGz& h >6IewΠ]bEԡρQN0ۓI>\JCi&|?>_OaJqo$3icC=cKnCӤd=L!_߲eK>R*#Yo:Lxaeby&0F7iql\;N2'.&%0)@$(côNK>NڢaI+~ҳtnU#O0M0/=\ʴUfz5LܧE,1tv^|ozbbb`50̧I4뮻f6uʨ[o5wɺ/'P.њIqNCA]ѓ:ew.hCO]E2'.1&exA9>kHc>Emn&E0ǖ@?"mx$ chq=iaOGd[II/Ԉ'0 z5jcs >Eˆb4 caҖ1ho93.sِ9*1Le\&0g8z+ !dFC_!F?6(})x \{䶕em??߳>,M)^`ܷė|)}nL&ATL~0i4LVhɞ@\ 1a>I˕'?jB‹>m?MLeR}P DCڅBO </%i"OAÔw,\rkUcH= =sx7U9sO56CP5&&۝***&Ā f2k~Vƫ( \p0 6<REk&[ a\,kô6Voow78$_O6Ô qh}'kiͶa= |52nw Cm)ޝ9|Cԕߌ'V&\o&?Z $=OI0_(60T!W._Zc,k_J/G/WǻE;[$Gmt%.le$xSzNkp]M& aۓXAp)T&FO+W}F- %u,tp+ 0IYaI(ȗ%#7'+AB]l~,9Km$NK "Y N'l/LGGQjվr`hE}N8m!V{ roIq#L>2jq%8"@ V^hb+|_k\R }zbbb ⹸:`;mJrNpyIƥ5Эle2iK%:묁Eo{r(FYc7xjܬA[BFl )! \ē^]QSXhxӟc%@s駋'E=_!Y(p-F/ǭ_.'}v>!ȀPy49h.ix3C'>=21 LTW\qENǪe0 m6 : ^%tA¯0dɁ}=U 1;v/B Qƀ_S(ƒ{`j7Ǟڂuvt72]'I@&h@VXi#""u\R %RD.ڢugP'}A>6*Cr1h_M[swsmW\TTmN˚e UAh&^em$I٩b0̾{#|OJe@r ɍ1ߝpLBWrm˷%7%q-C3jo9o@AgweE&eI,\ ٫OD;²&K5Vt`eoZOZ.lGYdrUhblQ&ޜ3m{n\Zl9U@u>%b3Pr˖1qWF>;a(*(fs`hrTM4:0#G(@IDAT`tPV/,em¹d׷qU[xoG/Hr 9P0g#7qs&+'Q!Qd1Ij lQV T T lJoun5Rr˼lM01ѴTðL=}fR =Ɂ5-Fȏ8|gZQo.]53)DZNͲQ5){[ 7ϕM~֗ڈ8{**6.d0Q&AJiMS:N}u BzD긍6a$ Lt1%Pq[.LJGMzEߋ7s# -.Z[:@ ~Uz}[ ̂e"y08òf {L:W|Ed}0VI%{fنX [_ҷMVRoZc9d#Vt' ߣWQ+6 1X6b F4ծK}_7f=<oZڲe߱ߑ!gܥ ^#īϬeƟ&3V%:/ֈ0RUqkc SJ8+Yd42՚e cӶ9"<+\΍[£O`ǟl֦r : M2L$/0Dܳ\r\ 4񱒲lBlwWO>ye)%#HHXS1[ּ64(E9lQ޼ys3Ү2Cf7 erryYoo*mm̅KxqRѳ:o8oY MA¼-Lg6kfrƦoXT$"\JOBaD8(}[9!!ĽTRpQ!s\scR+7ulT=>`R!~!̍lZ:>b?wf9;< 2>L+Ah\<t}o\Z}|̾B̨-ەGl=: Ҍ3]k{'5#2.J\F[M|ݳC muJ O$gKkG-") 7X |g 2%]o~Yx,$ IZM?Ik7d3TNHVL]y^$SK~k^3˳ҁUo֓Ӝmaa[.ijX@ʪAA@Y/ɕЌQ4Dmh'x3t LJ:8]iWt=;{,8%ڂկ~Rlǹ?^U{mG?-+aHTx4JRV![xbJt Y6aR?ucgo(wʈA}r.돡k_̔_yRdky>i1p,zYb^. <" Rs\E>Ƹ|OZT4[R)P&z'SnUڈEЎ?:Q2g̴'?T=# }}P#V6bEPd sT&%K`pǬ[tq4AMozSVi֥5"XE9-*RY {C,z_rM Ф}črEiѾJnSOXR1׽. D\5F^^x`Y 'l:m}\1 +E]ݡDAk !A9Y1 _̳m'?3IEhb_̧f[;$ICo!*HjAU}/x8Ztq!1by]+ZE7a ΊSXՔ1sPOL01$ä&ba.䕃砊Wnf8L+8 i /C3 ZYZ}:+rb؇IaZm&\ &M>N01$0x|*IxIm4L.I?&y^U=h||OJ_qbh󽧟~znW$wNyNTVׁ[; a>VNÔz|,A䲙U]vexYgNKօ|W\ۙݾC}$ ?|ֳߟ{\;I&!dx:ρIђ\[CWOB0 [ݒa}|ZBw)ҥ$Ow|>a1Zuߥ-[=sGNJar&>IH+q+[RעQe'y.F+9r}r4c3&\/)10ɼ%PFZ˗p^L|63 ԹyOIڀn8I`=Ysu=5JH#(OIWCAu~q=Z0!-Z\=^] }|wf/]uhVDhd~OxQ`wbrCgqsO$%9џBɺ6C=4oɢ.`>&F+)py\*%bQw_B]17s21nl^oJh#Xt%Ez}Cڬ@!𒗼dpM7mFLqNyuڏg@ju8¥-cc VJ3&<94pmBY~4Fџy}]mOZBI-qw/Ť8ϬX~7zWV@K{h .Lls}dÈj] P\EL&`c  S B7X6ڷD0;1؛V+b')[vZ{*ԕY 6X@r=[/2/)4܂N$+_>BDhRn@Yd%*'ޥ,5eIf=GZHf94hZ0cwq%yֽ3}r~krh׃H(CciYl+Z*_(Sv\z=ݼ L+A4=o(~`5BQC \* ש[H S2I$L8 '{ܱI J  6FJ1H`TbVӗ A2a @"p%L_0V֗@AB F\2b^~xn͗s+Z>nܽ\gI#4I2хq?{^1iǾTA_>WVNOK4 )NdRr >f<-m-@w9v M(hD1oƦFΕ^m(,84c,e4>,8PNDsArSľ6,0c |! =-} n-;jCqV91N?1q=L%܂L&RG9&r 'k{V׼9m ޮsnY=b3"s;adO\ipwf;c^7Uz(`6.eb-'=xУ0h=Bf|s+aYD[w.6X> i9ҶV`—}Lm0K|zʅ˜.bYCgŚ G4qTRR$ $y򓟼B/*%P1[Ǯonj]O༎5cC,?5 I<+fF$[mȍ*I-b],OVYO ws!dt])VE':zLirGhq'P $o78TzD%Dp*:6=F\BE6]ho[PcW1>v>$ɞ d3JfZhce[t>nvD"ٯPr2,:MJl!!5ad$/6mwQ_dq8bҦ8q=bmMsuOrO>zvh{O<_҄QmSq|G=;{ X֬#Eoh}1 C%q ;(2.f&J[.J]a6h/āhJD@CAE' yg¹瞛޴!Zٴ>SvOi olIQ/.u>,!7/d }@rblKմSU8JwK0=ݒ04]|LZ Iyw2#6&0^ihvHJ3 B[ [X} p3 r/dɚeع;#!8&ٴzܱ(;l^qd'g"#D\kU_.,,T5˯H/F;k'a0dzOX?\dҌ{RH2K |G^,=oeY̧y,G Bgd-3ۗ'W~}b`0/^Bl/xAfe0G]60r<|?|n{K aKj k@@;Wc1YBh1 TMÂo9-ᕯ|edʜ N0eN$d,B!\+{R忶H-~JPKHlA@p}]f1c=&Wd?q" ^9"<+UZUY1P1>1@~q ̀Wl| 0cVSnz_礬{uAe6BBs{JaqAypg6 L| 7ܰtcideRg b Ҍck+ mo{[^ip'HJ`A2?:n¨{e$,+3\*k+F,"e ~8ae]׎|MdU |MR W @x.0y{ߛ)s˂'6 eA%XSKySc P,>%#wyg}S!.<%∤>Hlպs k@8"h.d5"K]/fqg[34qN`b]bԧ|PdG2糂Rm?gikAjq_1,ӊFWGĮ-!G,91k3Qj3P-C )b{f7qlp KLch+,LXb$K`t Qhn]* 0٦04*c*8/ꫯLsd;[►_&ı ڷVb`ѰM2\^{m'Ap hQ+cuU1x=$VmGZ{G.wo$BH18k?e;Ƹ='\ЇSt)Mks9nV^ <;r1꽈ɥ5裟+]4ۻaŠu~,36ITFFxC1sK0g_ۚH<]eNc";PF#:ýβ&1+#(;w׹D|8T)P,XO|b}9s& 3!I..@yb37;L`V˅Gc1.ivSa:[1Ckd7?3^麼1]QA#FDc h#c_&` qP%R]j%xsU3pM%/3'K$M 0JژoBT߽:xŢúXlV%<tYy֘% Sh8n}gYZ6WN:)[A G >2˪(裟, e%\6qi^`A"Yͦ`Q|ŘRq<f!Q'nhW^(c P񈑥fWlaU%Z-#\+)x(t#$Voh n1 Ĺy0 UCCyk@ ?l!f ̗.<&LVͨحE)ԸFX1ՄWZLD1;~;182ar6r)211! d,zyfLI[k?0'vi0z >Tz 3,[1y.[c;O?= ?蠃哒!m7Wh̢2a. $PY{7cJ}3&rGe5E?Y"Ba`r P^XGf&9rBl+m6 Gm^ Akut8q,HJMʺF|g! |Y1&# qͽ܌ͣ6̜# h)cD&$DE$Zp5&@6 ac}bi3.pT̓ɚ:c`_4MɴvqoODžčU7j8 $ VcEƙ bZ0彬NeձE,!("S^7o`g|u^ë͢vD|5%a!R?/lZqu%d4]kr5`K1K(,3˲Y"7LWU.*ՔH)e!+*᠋~R) `O6M"|A!4b; X82~3V&HSCPBI)I pCa! < X ʊl. ŏX$jGȨ<2_7 '`0i{Y{hQ "k%vD 0qi h#7cΉK B?6Z,O#WB;4Nz\10.I-aZXBi^_%svݣ()=kSr6kAH0sM[_n'}YҐI_Cxnal?裟?%+s! $LS@B22Sl8qrf\UB8ІOhK%?rGd&*y$Ha^$Z-uo.}ҁW`#$17ΙhpJFlܯLlD01nzso _jX`hų`0 reTX |?T`Jo &T Ɣ~3ٔ^W^aO8旉5MZG?p^{R@'1}4=S:F\U3QqhmgԵrłJic,נ+- it_}X[;L\NBMaًrGP|"LPJ|s ";l70Θ4c<APGj,7ierA+Y~Ʊz/|F;~G(V)#߄>b\ Y+fNF{\ 66e`4l;˒D Nx+8A;&Y-mmtuk,ZWߘY&0 ȸ'3"4^ ?^ڡ"AyXg>1s7FpCu? JA/5]PY?ui `jJ pUݲe6 vա_pI{[_vmYu)_ $X5+;t 4f*z4 rfb(gW1q~XRݷfkR n1庿O )xüT#F6! (lmH\,GP5z݋F1>ZJ\Fᇿ038a覛nZQ\>; o]g&KQkm`0?6EkA3\1pral4m@[+ & w?(ߥ  C-E@뗾n;f)0Êt]rV ^Ttd)ڸe(2e|ך{[Ҽs;Ri ¨H=wNL NwM믳&7'2nr޿0`|MMnأEwSovK^~ZJs; 5 &H{MYN&K #\< 1FWܘߚkMʦM|bm\sĽB@@"c[o>>쁭 B-q۬4bhFhpټ3CM[[l̬.R>`uHYoYjG;eU 1ղ`: %>&R!Y[n.{; `bUeU ՗bU+lL w,b0WX O]_F ؄{{^z(q aw'{h=IŤMfY{ 2ΥjMIП 3q)iH9,&V8 GJ 5xb@#hP~[^[V1q1!!~1QTq.y&P&-L Rr@pe9KꪫrbWܫ_긔!ZSvuX`/^8>Ԗs."נ_\S@S`$K{.v 38nֿM'@{0w8kS )ǽ5ƍ6YgF$o"#؏ӆ2s\SYw<$ Jԡ*Jlc>gO暶yi`euZݵ,%)ź|_B}beb`C CpK+Wvh>R "IRZa93]''?9 sn&SNR@a o >_dHRo^ 79 o&f=P+; ['~Cp*V\k黄f8 !p#bt;%zy,@h8 K >GuԒ9PRBaR@Q~9!VX< `BK3q[ˤ-\_yW0?PH5_n<̶t@s%e(qf2Z'QXLV捻Ed3r+ H|sWJ$Fk͆MjZnI}Tf1_`ǝ*Iз"tˉyrYT3%]"aVEVH} /?J/.*7V%IF:0sڥ\[i$Y0`,QBVm/sT#s o֯~K)mC!>XC8d=3 y&Ppv`)2xECg$kYyx5i͛7gca ^쵾W;ϡi;~20Ue2;,OmN|760dA=RDC.q>,b  :8chM)akf`Di t8i X%k4& O0:%M`S 󁁦@{7A[.L,mXӂBQ|sLXz1rw!,MZQ~NVs[sǜ'PhxߞP*L}@/΢.FfW[1[C7\sMNK;jC} _|8:+B5(؅B^kv9nWoN<`GPe q`rʴz 7daFf8$ݫ{g.Cͱ\ڸ2aXw \,Rc) b\b/w]S֧?lVbƊ]M̻5̈&~}HB6XTr_b3M K/\PQХcmܛ((*ŀ~fl.pP1P10{ PFp&HLEm0ߕЎAE{yH~EM)$:a KP,x|;m;.@^Eq:X I 2q zǴTD 69k@ʻQ-u' ̷N>@qV+'Su54L߃9畃_y{{N‘%+ d:Oj͙f5Y& FhHt<"b1d xc:G;| _t)ԉK {XX.M kh֭X+ͥU30.ے-võ~L+HE0qCa.DK.٦(qBBU3v뉹vQutLҺ89KAl91CZy%1L \ג_7̓]O`ԠM4IL#x{)C2~]%`WXf& RYFrK1.)1'ŷpuZ3.!ba &c_9C9c^GjqaQ?ho4#dW0_qg<3ߓd-hVRucϊ0k.vGޗ2RC%x?E7e 21"q݄"|`JFW7eh>>WFVoyl߆3LNAZCj&;hsSJ~PB}m@6`i0DPXouCXNO1g=,.$ )K-[u&y[10@ȌpfK;)5PV BUX| Taham:wLu uy ĀD"%# nm)N=#WO͹$:T>$*Pj* -׫uX!I7*lL pa$pkslhb Fn6")4Z^w,T9\7}ZKl[\_+@$YY) Mn~ڮ0{d}8ĀX5jjTK iIAi8B U#8Tnb#̝xk]@FDZ}GuZ^yN@57o1?2 !ǻ,*T T T T .0o =^8n& };nsZ gVYd4F [^"(5;*Vя~tjvA?W acy?Kd"q[#qC]\ xOZJd!k)x@ u"Y!>ȸXwoέ?+ÀeMĒV@SU *Zi7}``+, p%,k_J @ʲz2{*TX 3kHukv :)rˠhaa´ IMv'XKNٻdY,{J<8& uM˺'Wbb`z0&cTbUN.17?k{j4к;%TO-]tX ʙd0AP%=ҝǠ{+#54m pW}x4ߩ}`T`%q q}m`w*駟^V!|6l ^sm{,]&5DM?N]y[a\Zz\dIfq~%6NjQV.li~^IUcۜnq@k? ~m[W|pv/~Ke/eʺCWk!,l˒d) T`2Ya,М[X]XMBp;3sijDn tׯꪬq5I+0Mb^ ciBYTջ׼&:FRfBg}vbثvU":8R- -ZQ_hEl-b4vB0*G?{'=)ZkoLeWPe\Hp6,ZB"a3.={衇f yy\@grs]qԜ.U׼v%zԣe=8餓rm(UZ߶l0@(/(fAXV_fi1UNXqB2*P ;,3Cf0:a> nAyӟTN;-3wA=Sry{C9$ KwܑN .EKs/(k'e7VN&h d5%muc YU*1R tB c|+33]>hg(;t5 @۸A]\Ս ~c`cEڮY[8g”^c,*@8EOYd0b+^l jZj1/9 #8蠃1B;^xv]"$XABW_GVץtT`d,HX,KFAzI@vE0Ykk߸do\ߗnUH-\ֺز2;s,羗e9ތ%9,9M[K_{h\kBi+? P|Cʂ%Z_!)w?GXFEwy?$n,E]7)QYXXe݉ᄻ9B}ն9ݚaP PBs~/[\}u0krmFvK8G@Š@ gd 71Lhꃨ 2LOXs"@luэ+xȰALW:Ț%G;#`"NޏP#p`j{Q)M=܈mܥ B\E k.p2,ϨgyI<wq@dQ~bar>G!f'<3+4]я 2m1@ÆȬC XKM#Z #&l9xiUidA ?(Zb3dkq}C BG" mݤ9xHo9Y)r p<0% ,ApIBi8ivV8Ohhu TM78Q dƊDIDÊ".FA 7!euoBO<1ieiGp(FCC1%-M0[bͼBD1?sd#c~ qJh? 8U@M"W66KpQ@4xiQrš$I bGh2M4&[YLի[&`a?Y)0| 3o|]_G& c(u"6DDcgBsW׼y̆ H~9f ʠ)MD"Xq'e ST2_E1\16)|sL3A`* I1`S?tSR.AW/ |eqFWwjZ߾b <v<[1kl% 4XMa%aE*Bz^INlѹ$Ik0+d$#7G@P1P1P1P10{ Tah8o3 Kg^m[ne%G2af/ iifPlsw むeJY.kYz".CYAg"C瞛$㇋$ P82پ &u` "gMDNВdu\W\Q3@fZb/>sg* 2+t{b 0k2Y&C\ \|h1*-X@z1E%k R ,B!Y"^g O"?[)(z'Y3a(%T=XC Hun UZu۲2W̴?{iX6r>esM 0 ]7)} !#&iZ1rXtL]d.Z.4Fh᪫@ ZU^~Q O[A^شY =((D%c=m%p"ȬY3)!Ul@1&= .ue6E~_+R+x]uϬ5頱g̗WMC@E?." &h4k6 MozXU(n`ܬ`YxNƙf[h ͱ@Msb9qg}vvKǪ|q `T|CYnh AD]ɎD.it]R&&zb>1:Z<%`.PXSߚ)_Ԯʾ{{9cI פ3&tJKuLʲ!>06̎Mx%#VU-Lkg6p9d2KqK+g:֊:ZAGѡ{/0inj Th?}sOM#4ܶ`cζ(8 zE&onȋt|8esp>V"H<e]2*4gRcYiB~bhߚI])Cϥ^FKWj[`w[t.₀*/ ڸ%1V|}Y0BwvӟFu]7:j|^daN-+\ di*㊁e\p1+3D3^{eƩɰ7F*JV Zn@"LF7L}g<#34AFՑK!mc^j K4bDhЭX(:$qh٭E};+V \ޤbK1yWsypq[]Gъ38$V`'BtF7iw󇁹=!ь,f֬/bQÀ~/3|1p@ضIKr`9 y9 9aVA'Hn=nS 0[{O+(Z,YԵoXAKjx spo'̀:ǀ2RkZYg˖ c4q>+ H=H]Bc`ܹ =0A1w{\]Ymi8hEHB}N GhIn? lLõ*0Hݴ6.EeG(5 Z;ֺ~ڎkfpyxĆQq[4I 66-"?؟fHߔuJ41'eI{s>O勫LsM['cgsoeN 6!\ԩSOsɨI?}s=7 66ی3 pH1C v}d.=_#c 00(\dFhLbxK"o,Jb5mb[& eVI+y4ks7ϛ%y{"RMF"d\mepso4|$tLZv~AWq]7dû6\$P`0&xH [xFjSZvV 1`ꫯαAzh+q|`|aA(96(J wAV=j1( ԵLvw毫wQG "b+fzյ>v~?NkϤ.Tg&ǽB" ?0&>Pu6V€%OjHBpB!rvxeGPliC FeG[4}/ W;G8ܻsUY}^`Geuʾ?"H=W Hgm@dLT yއf.zfu8dz}m1trkedǣ+Yc@? 㗸1RGJksskQ/-k՘ӼֆsFT B0&Lbt< ,$" jχqRF0%0ϞD;Axi!qV۪'~N|ַUM~pYLInB & A=H4{sq#:֬=rrQG nI}46m\0,!~w{Ec6D؛k[q~9W:<=%'R\_uab(z;M mm!ezX aKdrW37z'45yEY "[ ͛74ԙ\ވȩ=i_[`qHVN—~#S`9ଡ଼|(?ҨD-ӋZ:vd:+tc xs[*FX21N w7ƜZ̍011xT aH#wk}W9@d“g)B }l",YŦ|'VF#YuPrfzeiMy!>&fO<W%>{ySYWezr a(F̕[, ތӄ}ª/vTh0,M~Q@M/G#1ca @a9m%\v,f|ae!@f/~\,U*daFqj&WL B؄t137R,=B[p.œ5W=o 50 bV'6:ϲ>bBogJ! W0DWa2 PfY׆Pl3}j=*6)^Cwd_K,g~gN/2x])=Ľ[̍0J$c:o@ !@fopPOَ/΅<@{|;ˏ3/&|١L_w=g7>s]t[G a.-}1X}ǘ1^~ H]Y\g=Ѐ1aH|cx5Xx={cK; 8Ǭ2ZSzuI' .ҜU׀GPƹV2ϮaY*L.bm\jc7>heÒVk&}_a@|Zj3O[/d&Fe1Iq8gyυ@̨}lc1yu &oH?87jr_k/I8uͱ-pC!1ϕFm_ppA9} a*D/\7 ob<ƛ2Ycm?ƛ{Aϴ|JbOY 9k1sBF*cykn8v͸mCtr5RžVcjQ΋lDZXkq9x{A}*x}1[ǃ?v!tg,ױi?pS5\-̧)3u<UGǓʁ鄑߽(#C-,Œd5 "={I>Ò +gg?}g'w<+gKm]a5ooo,֖&%y;C~f 618aT2 FN:[jUr{P/Lh_(c_,1kXo?S6ֶ^:x:`9mkeCb{s<{Xl׮AGٗ4}Y pwuQ$[ޗ Giwu&  Ƿ? ;A䧀vϋdz^q;N& \9=rCqscS:9q T/wb?zwʁ׽uN_ĻhYBDibU܌AVKƂ^Ff:e<B̓gi ÕB+)90_~K]@!r|ۙ8I*ڋs#?RͺM6⦭->:8ub q~ܻ߃8O9(u7[.4???ͭ7|`sӵ PnO٧/'+O|i`'Zol/~o6xŜ,Y^ckmiLR jjO^w82 ,]møGAVZ\;ϝ_Eo]F Ս/^D z;>uP\vv+;k(YwqN1kǣL{].?7@< >w%NL5![ѴQb2uyK|tf}>)YnXFd,YFmنRPe=~@} >~&%}^B~WeZc %ގq,sY;.?z;D]..).V.inųv>7wUǖĴQkvcw9/?K'N$ @{%$dXtW3|W ;Amdh4us/u߭ÌuAG8u]?iqGKϿ^U&,.ixciMwr 98ꨣ=sBu-2Y| cBir8 D]&tcD|sv+b]V| ]VZMn~nIBG`xz8OVĠIbMvm$cn$Nȁګyғlr/Fp[ZH}7,1-Luvo)uO_;ru"a#lΞx≳w俁s&7_K xTTĩu^xPzM&G5ߏ![ouW9餓}/90oX>l%1fpQ[}1f5}th8inpQv7}.W]uUI}_sOj2e]6PmK?8cz_]ͅ^|;wZK$Ɂe@efx^.9H 7cv;ݩX_GIOxDs 'V_m5@|eoޘ8;y[\q~>O7[}ԧ>hX>6~%|B;S<9Sgnɤ{ b=s̆Qk4`I0y{6k "/X~$Z~Wy\7ۥf47\{Sq+uWӱb9;7@馛6[ne#VMv?mY].ְ3Izc[RJ7= +}袋.j6hY׸׿.ןu $F mF%لr ;6M:ehD6a߿w7?ڑwLaՎqȹnw{ܣ(t!nxguVj;}?o_rCYOr 9H< 3,3n+yp9phn훛r\y׺q/@~5w\YjkO'ۏ~[:cά?'>.g>s&-oy/u#nHMIɁ@r 90H04![=7o~sזy9ۋBcv9ū:eX= OhN>\/zֳU=9ϙ)&PMܐ6ds;9H$ρCq^a866k7Gxɿ?]}ysn=VK$ c{1ks'p&ߕDe]=yy7o7KuA-O~>@X^ EɁ 9p\Ya֕H$ˁ~qAs6wغcy[ I@`?ef1vU\@L0kVGrk1y9$Y0io>$,Ox3ųw4c>#}sH`.@Xrԝ|K_j>蒠cPuzUo֓w17O,]vQwUsۭľi'BlQ_:o^Z[U5ӷӆ͍ov˙2_= 䍚5rFBɬ$7M5z"/Zպqw^J_Mnfs;n0t]Pk|16"M Hov;>ZnVYaL6>X/vnΠ%ie.rJjE.;Ú?!-Thgz9[܊8Dyy(q (=lˊp/^\Ot\_\цOw}?ln{>/fXoۧ5w|.3~~4;;Uc6 u|f]g'8 &ͽw{WIX7m67YsR?9G̴͆37?J>6-~s6s>\sgHuZɿu7߾뎫_ 6 DF?я6&W/AX-}nsޠriOTRownP@}5qhrJ:I^륹bt|;/' qjN:F}{Qp,Kzw KJ S6&&}Lp  7 YkxkYh-Qn\"wu8|Xnu[M%?5?1B   ӝ#71_`h~%, +=YuD /R8!>?i|v븣B( !4 ]wbq:$%\R@ -{]rpc3Sb6~+^7g`ꫯn=2'k Zb!u$'K$SA!f&`JP#:n 4E,Zҭ=@_E,)mlp{;ќwy%gvh,5niC@#Vik_f6*q,?xjbZ'!R,DW>kꓟY 9L/)90.ȘqyRs{9#`d` O$S 7lθXUh~n j P"DR% ҠxjXl 6kLM.wCK6;VחɁQ@Q|*٦@r 90e,@N(@lb=e,] K#?@@2)pɱ+*ƇeGORr`8`hm@IDATvkr 9Qȱ~A Fh"&@Q?l;BF֖,?Ƭ` 9NcвɁq ?")9Ѝ1'U)@Y[}0zwSGO PdMSƢTq T=q/~F@7q6Mk(,IN~>kJ,Eӳ2Ҥg_ŕCm@r`(+=)9Ћ!p@d (ǺW5% V>m kT3|;&ޒ=9`'k@r 9H,7BrB -~n^/kSzmɁ@~>ٺ@r 90h h*2zhÍ@۳K@r`9`hjSr`D9Bƈ>oVXxsGy\"A62bx#ٜ~Z{Ş@^x8p㕺p^79.ksen:690B8s;#Ԣl(qFs׎R-Ɂe@3,l΋$Ɂ@r 9]a?);'9XN:?,V .l,V_W~rg>X`3[k;P©}\X 怕pOd` @{^x+N8A){WUsܧy_di|;Q\ 7d~ڮ%P|^\[(%'wQw勫7|kt{=bwɁ8|:N֣UoUG㵪]UT҆8u\'33,;<פ@r`88rª_=$}|}Bu+?M:vVubVuU/}KWu@B)*:Vu,|:cU'k?U;S9ޱίX7ױc,;k:w~*XwVuv'jU'^lwV o(۽UPUtɪ-۽ꩯ*}cUsVu /:BV}-e}cd<\q\uꩧ$)g=Y^%kO2<5Ssg\ᤂ|PgA̍lVVX4sMJ$ÁpKV{-XŌFDLRQw}<2́|#%T\ ec5)ه-|eצ׼5 'yywmx3J\Y(eTW\1S[3[QTR‰SVF.HTVb>O4뛋/$"*:Vr`YP!&t 1'!پHR1iN@)$IunE8:`H̙J6B`Ita_[ծřw,`oNTkvkD2 JBQ'&E͵Fj0Bp6Ϸ 9[G(]SXi, r*\ZV5|h3pz˒Ɂts'e.(`-qs e-nd$5bЇ>gWV[mH\`>^$$Dr vYgĤzGک,X;k-yG6/~rL؉+xYv :AKYD'@PL͈1eIMsª eab6_'B|y92Բ\Yls7錥hb7H$5Rzo%e6uX|Mp;4&SfNk^Pv^vv t$6o=ǕhR)^HOZY\yְ9,!f28 fY|<8 YKr 9oH mpnz нwϵ@HB#RBwKQc'мWmj=߮o.-fM o @4M&y>D{ܻ72H}/?1{fW|N $Ɂ%r@P'k\U0ޡbp]=5D2/ŵjHmP ikd]\FqSyO`[0Y!<L,1, ;?2X]s5EQ ~8:N+;^qlkr 9H$K|[H aw!u-iVa$2Yo?ORguV `V\+A7K!^65s`P4=ao>o6l2w4Hԍ׿uC}]uUe yAۃߋk~m5mׯ?7I$Ɂw62@(}-7y5>' \Dnַ5k(]vLUb.MYmfaqA}wӝbW9.K뵡~e ܣa `ÜVAJ TCl~ wSEVo F1X,qtaG>% _’Sfv1 2`P]&mn| _bEnW,?>RNZ+1qޱ[j8qן5Q~*z׬Mg29l܈gW?a^3N$ɁqP;07wb)-_W\%ftM@7b !l6E~0_SLj8&` :BҔ(wld*]A}W,qAyo\$t~eMycJyJkB[v g~qsQHF]RbgK ܷL Cd >T[ےFa @Z^2a-K8 /hY%8Ջ+)x<Qf}) ^p90'?ҶiviGM#êE,?2yhZKoXzɁ8r@:Cm6 A'?IyCp3Gt%Ng=ArۯX?xip}(w<`ynsYkmn`(W mWR#,#HI΂~Bq>.p;裏."@'v@ IwQI=XM!C Y80lr5wg^O}SER6HA^~4+_Ѻ@%)UeD-cq Z&V{^駟ޘtAc\@{|^*ObIR? /+ $sb5ʚ7 O2 j,IGMs qL=b<1Oלxe*ְP@l Xp@bEa`&Y Y8"5yg^ %ʲ$]*}LgqFiq01StxV^nVam׍Ŝ LXA!ڇaSXzYr 9H ܘ{1V|m!! xp_fB&Dn/3橂bm^$tݬ?A'UNn)B=f$4P5U6KQ- eka 9z`o1k 5L/C|O@/kmք-W7/L' <Ӥpk 8,u|Q<27ŵ<7=庁8?Ɂ@r 9kXs^YB. Fq / ppA~2Wj{1U|# h[S$<v B,2}ݷ$?fw.D|li?t ~,@ rD/qQ-~bƦxٙ?Ɂ@r 90:=)k !7AzXkҒFi p5CFa)duR&m >,qgf\e%3@ds};blpԧ> u~J BYHY+,Ro34fA{O~򓋟,q}'׋)-oyLz҇?%ӳqݒ"peZLP *^=yE=O"9e\+LH$F9؏C&ZQ-q 3m6I6{-nC9w+A. cEQ>zԣJ2]{J,P·$2@\X\GPU?.rU&q;餓f[OV9 g QH[̧ PX/,Q! 5]?)*baô+`o%ɋ/cN,}c=@/ǴHI)+kHD?Zk;3Pd.Q"k`BSJoϠ?Btoٖqa?Աoi5Ɂh xW2[3 ߳^w?OtdX(* ~d^$d9E(h>%,B @Fb֫N㾸gna^v+?WY%gAʃ" H*ѭ͋T˵X$w /b i?uNVns)m2m5 Xe$>~'A dk9H6^ƒHm}Qn:=YLid ³Coƶ͕:)$NV_m$SRIS|,X-?2,uƐAhO|r\ : ̭>IɁIA>@d"7fpy<0oe]VN9.Yi۹b$m+&}H;l:7("{b[).j@ۀ@% _R{Bu!ȆǪ@ଉv%wEbM<XVt\xXƍKgcu2hG'1/T:pF N?ƨa\+  -$%y(mA7LO^@>WR|w sp1dhA&4BM#8bfk_ڒtfGg[؎ s޵A՛2o2ܼ_!Km$#QpQ72CEZ:SMX } /\2Ɂ)@| VPz3ºYMG[`߾B#^+#dmJPHb⦇4~vCxe7wԾNw `HL`=ܳH$tk&6 vZqc9"pQv$Ⱦ}cD )ǚ zғ԰}_l%u8/[^붜wyeBC}WPʹΒ$E+4ܫ MozY/m1Y\I 9 !ú~+پdq@B5q۝ I7'pBsӌb:ꨙ)5(U(e) Y(QЇ1{|r]nc 7] 6ؠX3m5}%Ƨv?Ur` {W%Mj~8phW/tS Hڴ;w6f叹bhj~VhK[~y?Ss]}oREHpł#\Bz]v) w :O+IaVWMpmL9\m47ja>d7c%FzիfwO?p W겆-T)7\;^?DՀ((.rƽB//UJq~!c;nvvus }2,޴< ɤЇMZ8w[l.=1S'@:־w%-33E ՏX}|O8I] ]'i80Vc_(90=a3P +A@ D24ЄvmW@ w6AkMg? + W\\`B}~Kx z vJ{ XaAv.BR2K{kq5F $ Ji>CJd F G⣒>Gl{{9甾XQ o(_uY⇂BOI#ڶ$.24&[Y@/`؍eDv&B>1.s\P-m%Y G?9蠃<(\d#̈I" +7 |͎83<1|fD}ݷ-FFBWƹp>/.eʰ$vq;餓~}Ĺxb 4D-܉ܓ.kg?E3mO \D)TuyY)F%m>օ~X-)L}<ߜ6(?i49|.٪r^/u3,sGѲIAm:,5\XLe ĴEtk B'.X7R:I^WG%h;@\Nj GT}d>rZVc=M|-h1z$rsfqJ.<_.!/% 9ϖ.5Y|9Z63\r 900@ЧЊ˸3kv++\p!hyŢ u#PoV,I⺸5r?JJ,1Ω'cݯn ð*'ieDQz2Q[eDY9P[_X} 7|bjhijX>EcU<(>f=+AݚV=y]+lqE?}ĕO wL#=iO~eG?2-=TlݣIi[XO}M}-2Q|S.f$њ# )90|Zc?|]MJ,5?0AFK_R}dYs]nx|x3c`~493,?򑏔D,mgRr`98XBy}yk+I:8үR}_.dgbE4>(^:? 8`UyE$yx$%ƙc ʲ4lIWUH+Bdh&KIV4A,+yH\%F4Ao`B 2F&dgҞjÖ[nY>&>zci>7'vc 6|~]v &a\'ʁ: qB@ԅʾ4L,ʏ%;jꪫfQ#g=@"4"&^X68TH07^u";\<=/[{:)$vg%6 qU>Sa$p뚅A\ G?rKKKoNIT*.o+sE|&RzJusj+'¼%QYOX\*( "$PhsdBSʆp[Usq{0 }1FL\ɂKBɱ6iI* -SPBҌ(?p ]TyccFX!˓LލP3aeLXC WA@1mҩ{}~yρ% ;(Zk_kbF\kYz!eR>\v<%Ɖch[6%N,DP"$P!oh֡38>\rǤvW\qE1 <.xgyf9|G?ѥnM d| X?ӊqG35.)9"U.62Xk$iC 6sYW\,6>f7VL|j+å5+ qnL)^g})m26VxÒc\`X\"5qem>1 )KbRPz;rxN PO G{]SFk)Rg?YQ (P(lxP_iRu)MX22֓(m\XeNJ; }/ υfTv %$F Grdw95}^.X\l/=cTKCL`q m88~ZXd4LIɁ~iP ^{@#,?яJ\JwZX\kA>@Mx39.E3ie5gknz4"q] ZvOxňqIeE]lFe;~r Q쟴g'c=i3~zw5mZ%ƨ/,oۋҔ׊1I>@ )Od}"M/Y\Vdsý5ƭM{eMBkVrDX`^lZ`A`5c //W۷{Fccr;K&-LM~ln'gDu.0ZD3M-\I/䒒ёF(p~E!JNyzm)(Ve@D9v76:ڣ1|b׿I lq#4zXBM&Re&a[@}/a9МOS<-L!8A! c'%1ΧSvQ7C8iav^d4weh! 'ui Xtgǻ>+p]yBUc5w2Z^ށYvuf8OiIp_u>?v!`b:׋wsXayt3G>:<MpWmuBJQ"f$,0Ήw ΟoY'k %h'39VJ+5_p`июsfe=G g7@W F.rΥފI3Kp5ar} sYIKZo'iKh{?PVrgi<8;-η3ⓗ(Y,W|Uw޸TՉ'vr`80v1CIj+m77˜`9aOXAN{U?0‚BhMX¿cQ#B8 @$Mk/  ]s 7'|ц9N/Dz)# \ ~%P@#%E0?-ypf?;,&mM5"4Bn<#Fck}݇CL: @͝F8 ObiVhi~$E\ -|0n}2 >A,#k!= Ј@Up[l̿`.0Ý 6u~m3p RC;hFh_[sݶ,WZ\Yd¤@r`t8`KPVqյ,'RY"S﷦0~Xr`\801C45O.KÙ#6ü \L ֎Ӡ!0*hihN\QL*.f x:F3Dc]2/4&LEE>&V/7ܷXX-XZ>, ܾ;`sy\Z"`Pq-ųJ1[gB^mm \nL\<;倍^Ty2 r= D\#q80RIB]~\7zi,&wS?,µĵA2O;"sI%AwALϬD1GgW`0_5Bmi%5c^&k $F =tZYڄxLKƂ#a r=}dϓLųA@;~8#6-D >V}\\Uh}:)9H$Ɂ@r7 e 6(<"bGWѰT"btͲBK}5-;O ף&[ou)΅B @!#@  bybuM\qOcu,G\amK- %úVzn5'Xp5A._ aZXqcmdCٷ!z2NL2RBQP,=tċaȣ@r`80nr2YmvILp`*ŠoT/s4XXXsuV0eo !~!\*@UO "xn=^,T=_n\j@?*xvp c- _j} B,sw(?Dmu;xRruk3(WErǚڒx-\fQZ!IDAT(aمpř{6O$YY69HL" o 5,?HxnN@O OqjO&6$;BğHЋpAf`W#(Xq@w]\|b"reA%LHRr`R9 r'Fqq>#XfA(jXF :xa;G1& }y~6E[Dn"xSTIɁ;79  knK4Xr34egDd2G\.rM۸gHxR\uLv`X# \\ e|X$+@,XVROr`9}`Ee]-m ݅4 VwkY>Y#edtPKMU ɤ}DʑmRG<,9qI9@! vo RL kqXr_D*Y=eubgYMJ85՞qnkϟ#\cra5{o3nutLp=Y7>S}4iv7OLϸz(Ki[u!4uFL/IRMSDy$?o Xų #xDXs+9hs`MG{N"6\ 1:Mf,8fdrpQ?D HFz}aiqԖ. . >x7PE\'VpGg*&NqUAJB8>$ƈkz-9mB,2GK-B)B&Ed!Nv͍u9.tpcKh$xJ q%3H!P~ʗ(?k}xo#17;8ϥp=%?BHZquz.1tFɳ{؏)O<M -c&jR-$d+ % AG}t'`w0 h2H$zDDx_vm]w^wI{ur#90b n/B_\;bfhYV0JЙHI~us}_^H\ !Ԇ @ vfI$EaS"-\&$A=Ӣr%,q,w&)ғngc~I| hyI~PW32@Y܋ ܫci֕o $B PY|c:6tҧ:$_P|ńbk1a4o0Z2$K @j~@&e|kLºCH:-FCYj3$,mΓ6&&:/-^hZbj ~XXݻ /|xI@r;& u#ok!uݴJ?92#{'f ҺSJ,4ք n<~_@>X*e?+2?n1L442Dn'##D rgb .a1" N!J`9cEeqn} 24JIHꄲ `:Ȯ]+ 0?Q_{];nTDZՉ3= _|K@Y{Ԅє@kaۭcKš̀~72PxYY .(d E K1DQEu[ro0PlHre$8@Nul>4IɁqQA4+@˂cx>)#h>V!H.i9V6ZIɁi71A$2A%ng\ڸsiD I^ ͧny (Rw]cLiJw=Veelh#K/.QM,a7i9 !פH.,1wj! ~"`#´@;$t@bh(0B/;;?u"" x}3#N(^$NK |+.JfIrk1x9Ԟ%bڤ@r7M62 E AO=Ac֓hsdM "qF#wZeneB [[X?W*}ٍu-!+\{lBs>,ZZۭ~bj"܊!d&MzwĜ DEnkq=b{We c@;R~Ǣ$Z5 7C.\?ԶR_r%3 y,90K[+T楠EnC(M@DmSϏ@6H=)ЛF_l|q UR p{n85K9N";•z]V5ܠ_plO8q=545nXAg) jg1(HBlE̯b\'$l`iuIc{{≤&^Ŏɔv-ZUMd"qfIǁ._J80`HNSCJ.5>Pdy1 @CD dY;\sLpȝGKjKjU_U l7e?Ê kUBp%4&V0rr`8$V! jk-pdYv?$nQ^Nݨ_ʷheݲ,!\MJ,qJP-5K~VPJFm#vG@c%txiOg P 'J&/׋@k⮒g c XYdoFj\Z[&A4"X{\)9JW,d hRCPs7L:דϗ{ǩ+GHpLJ$F!Zޖ?ډ vȀ2n"sVFIǁWyr`,ZS<Ҍ2 68qWKY!@2?me8et~ueN>ͫT>%% q6`Fsͅһ<H Xk!\qü떀Nv`ʤd ٤}^/_ J ؁!kx`N(o0щ/Kp($ `]7W <L:nIb⸨'RrOPlRENLѹ/Ll.DQwɁ@r 9X9``\[.o: XD#P9/BEQ60|KAp`\/KO9 jv+/o}k}yK^yͫJuܧ1qrm{l;.2CIL%1n7еfŌs>mo~IH!RXv-,0$~ N?e]PLޝܨO՛c Ϭ>s":|˪UРE49Iۍ=Cr> tQ^/ %/?KCZFA>@~\sMcg!pZZ)DXˎ HsЋX%uJ~SB%`XW!)9Xwh&LHp-Ļ sK`aB@m_0Ze2]'2C)7W;ukvʝzXj"II.UB)|Xz h8r `cM9LX;HydI 4OaPF>?X|ѕD`h^39XQJDzَ(8:^{ >[mU#C#K;L(6>ϔ\߹ubǖu )E>Him7߼gfݭuR ~Y݀8Cjp)#817{9 NF}s:)vYXwi2#%pPLX(I"{wi{(A[\QdQ~&9]$Fq^xE7/D!4ܲ,&$P HIvu.+{E9M`ܠkBeIPp | (s&V,iHYzH ˒'YddQ"8QM)ǤҦx_߼򕯬m]wuƍN-C/~>&D7J䐔f "{'c;bL>fMڽba @dm28o1)`hRdGr`9,hM7-D~W&l2狰sI>hcE d5c>$ j'׼=ܳ2xq ~ns.:^=q?zae' bFeygɁpX.kKJd^.^\ɸpOLN?ZbƉA%?yK5X/}b"Z)8@ ֦?yX&{/wmu'p-{:EL|lA)?0ԶUWOL"ĽkT[br=`'Qm 79ǒFcbC0VI7<^-֎loB~ 7, %b5E ={~7BXeveb/~HML9.N84mzW2'ݷ׬95,A,91[SsnA嬑դǃs YB/?( ;&'/lADdXM:[礫chw`’µ=FIW"Д9Gl @⣁ŅML\Bu}C[܋X 9BBnf6ae"s`g$17I}^EGtۖ^4{ ,a/psc=jmL>M3;$w5V&}Sk7voeXcr \zVWM?On@,bRr 9H$ˁC\20j!¨8͵^[ 6'E%KRr`~ŕ˞x ֪uY3`Ɂ@r 9$Zlr`9 BBB+ 2@ +Qy}8`dԯ"Gl[;'s<H$ɁYH04'9Ł4 !^f/T>Y耠B8kgumI((BQ>g]Wn'Ɂ@r`.$Cy<9@ڍ)UBGpb2™Jr#90cm+>"Cs05'Ɂ@W$ʖܙHA:T Pj VhiYX,1G@}A  ҷűCO:s;9H$8`wXr 90ÁB@M:LB8}-PYj79@(C rM$:q%Y><7wwD$NQ,A)Iٌ…^}@HӿR@RqT?9H$5PR@:ӳw >* VR9` @%c,y~r 9HL { Oq}}xf(?a0=sa iXz'VX{`(ds#.9H$eC>ZM}{ka_9 xfB@N.ϼ@O$ 8MWk^} _PԔH$Ɂp` ކne_[hj/gjoϯ].ۓ  &,Fs-y@r 9<,+>/2>x;k\wue1G XBN|ڜmM$Ɂ@r`9,`Ka]* כC뤞 {=A֖x^3Lwx%kN$Ɂ@r 9> s v) nrBױr *!VՌ(j@Q/~-D.Xc,Hb6Ԧg'@WXZyUOA ~3tD뇂NXZjq& @&p, @pX7vB2s_O}6v\йN @8475n*4.zAF{߃U`՝ >5_ sr @7  C@Dq 57/hϵo-r @ 6No#gi僗Vytm O}N@ @0`o߶e@tǿUfU6eA] @^ZhBP[^MXjv֟sޕ @v}ͯ-?^psW @xj>6c @5>M+$_Qs  @ @ @0uC @~IENDB`alembic-rel_1_7_6/docs/build/api/autogenerate.rst000066400000000000000000000606401417624537100221620ustar00rootroot00000000000000.. _alembic.autogenerate.toplevel: ============== Autogeneration ============== .. note:: this section discusses the **internal API of Alembic** as regards the autogeneration feature of the ``alembic revision`` command. This section is only useful for developers who wish to extend the capabilities of Alembic. For general documentation on the autogenerate feature, please see :doc:`/autogenerate`. The autogeneration system has a wide degree of public API, including the following areas: 1. The ability to do a "diff" of a :class:`~sqlalchemy.schema.MetaData` object against a database, and receive a data structure back. This structure is available either as a rudimentary list of changes, or as a :class:`.MigrateOperation` structure. 2. The ability to alter how the ``alembic revision`` command generates revision scripts, including support for multiple revision scripts generated in one pass. 3. The ability to add new operation directives to autogeneration, including custom schema/model comparison functions and revision script rendering. Getting Diffs ============== The simplest API autogenerate provides is the "schema comparison" API; these are simple functions that will run all registered "comparison" functions between a :class:`~sqlalchemy.schema.MetaData` object and a database backend to produce a structure showing how they differ. The two functions provided are :func:`.compare_metadata`, which is more of the "legacy" function that produces diff tuples, and :func:`.produce_migrations`, which produces a structure consisting of operation directives detailed in :ref:`alembic.operations.toplevel`. .. autofunction:: alembic.autogenerate.compare_metadata .. autofunction:: alembic.autogenerate.produce_migrations .. _customizing_revision: Customizing Revision Generation ========================================== The ``alembic revision`` command, also available programmatically via :func:`.command.revision`, essentially produces a single migration script after being run. Whether or not the ``--autogenerate`` option was specified basically determines if this script is a blank revision script with empty ``upgrade()`` and ``downgrade()`` functions, or was produced with alembic operation directives as the result of autogenerate. In either case, the system creates a full plan of what is to be done in the form of a :class:`.MigrateOperation` structure, which is then used to produce the script. For example, suppose we ran ``alembic revision --autogenerate``, and the end result was that it produced a new revision ``'eced083f5df'`` with the following contents:: """create the organization table.""" # revision identifiers, used by Alembic. revision = 'eced083f5df' down_revision = 'beafc7d709f' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'organization', sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ) op.add_column( 'user', sa.Column('organization_id', sa.Integer()) ) op.create_foreign_key( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ) def downgrade(): op.drop_constraint('org_fk', 'user') op.drop_column('user', 'organization_id') op.drop_table('organization') The above script is generated by a :class:`.MigrateOperation` structure that looks like this:: from alembic.operations import ops import sqlalchemy as sa migration_script = ops.MigrationScript( 'eced083f5df', ops.UpgradeOps( ops=[ ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), ops.ModifyTableOps( 'user', ops=[ ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ) ] ) ] ), ops.DowngradeOps( ops=[ ops.ModifyTableOps( 'user', ops=[ ops.DropConstraintOp('org_fk', 'user'), ops.DropColumnOp('user', 'organization_id') ] ), ops.DropTableOp('organization') ] ), message='create the organization table.' ) When we deal with a :class:`.MigrationScript` structure, we can render the upgrade/downgrade sections into strings for debugging purposes using the :func:`.render_python_code` helper function:: from alembic.autogenerate import render_python_code print(render_python_code(migration_script.upgrade_ops)) Renders:: ### commands auto generated by Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', 'organization', ['organization_id'], ['id']) ### end Alembic commands ### Given that structures like the above are used to generate new revision files, and that we'd like to be able to alter these as they are created, we then need a system to access this structure when the :func:`.command.revision` command is used. The :paramref:`.EnvironmentContext.configure.process_revision_directives` parameter gives us a way to alter this. This is a function that is passed the above structure as generated by Alembic, giving us a chance to alter it. For example, if we wanted to put all the "upgrade" operations into a certain branch, and we wanted our script to not have any "downgrade" operations at all, we could build an extension as follows, illustrated within an ``env.py`` script:: def process_revision_directives(context, revision, directives): script = directives[0] # set specific branch script.head = "mybranch@head" # erase downgrade operations script.downgrade_ops.ops[:] = [] # ... def run_migrations_online(): # ... with engine.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives) with context.begin_transaction(): context.run_migrations() Above, the ``directives`` argument is a Python list. We may alter the given structure within this list in-place, or replace it with a new structure consisting of zero or more :class:`.MigrationScript` directives. The :func:`.command.revision` command will then produce scripts corresponding to whatever is in this list. .. seealso:: More examples of using :paramref:`.EnvironmentContext.configure.process_revision_directives` :ref:`cookbook_no_empty_migrations` :ref:`cookbook_dont_emit_drop_index` :ref:`cookbook_custom_sorting_create_table` .. autofunction:: alembic.autogenerate.render_python_code .. _autogen_rewriter: Fine-Grained Autogenerate Generation with Rewriters --------------------------------------------------- The preceding example illustrated how we can make a simple change to the structure of the operation directives to produce new autogenerate output. For the case where we want to affect very specific parts of the autogenerate stream, we can make a function for :paramref:`.EnvironmentContext.configure.process_revision_directives` which traverses through the whole :class:`.MigrationScript` structure, locates the elements we care about and modifies them in-place as needed. However, to reduce the boilerplate associated with this task, we can use the :class:`.Rewriter` object to make this easier. :class:`.Rewriter` gives us an object that we can pass directly to :paramref:`.EnvironmentContext.configure.process_revision_directives` which we can also attach handler functions onto, keyed to specific types of constructs. Below is an example where we rewrite :class:`.ops.AddColumnOp` directives; based on whether or not the new column is "nullable", we either return the existing directive, or we return the existing directive with the nullable flag changed, inside of a list with a second directive to alter the nullable flag in a second step:: # ... fragmented env.py script .... from alembic.autogenerate import rewriter from alembic.operations import ops writer = rewriter.Rewriter() @writer.rewrites(ops.AddColumnOp) def add_column(context, revision, op): if op.column.nullable: return op else: op.column.nullable = True return [ op, ops.AlterColumnOp( op.table_name, op.column.name, modify_nullable=False, existing_type=op.column.type, ) ] # ... later ... def run_migrations_online(): # ... with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, process_revision_directives=writer ) with context.begin_transaction(): context.run_migrations() Above, in a full :class:`.ops.MigrationScript` structure, the :class:`.AddColumn` directives would be present within the paths ``MigrationScript->UpgradeOps->ModifyTableOps`` and ``MigrationScript->DowngradeOps->ModifyTableOps``. The :class:`.Rewriter` handles traversing into these structures as well as rewriting them as needed so that we only need to code for the specific object we care about. .. autoclass:: alembic.autogenerate.rewriter.Rewriter :members: .. _autogen_customizing_multiengine_revision: Revision Generation with Multiple Engines / ``run_migrations()`` calls ---------------------------------------------------------------------- A lesser-used technique which allows autogenerated migrations to run against multiple database backends at once, generating changes into a single migration script, is illustrated in the provided ``multidb`` template. This template features a special ``env.py`` which iterates through multiple :class:`~sqlalchemy.engine.Engine` instances and calls upon :meth:`.MigrationContext.run_migrations` for each:: for name, rec in engines.items(): logger.info("Migrating database %s" % name) context.configure( connection=rec['connection'], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=target_metadata.get(name) ) context.run_migrations(engine_name=name) Above, :meth:`.MigrationContext.run_migrations` is run multiple times, once for each engine. Within the context of autogeneration, each time the method is called the :paramref:`~.EnvironmentContext.configure.upgrade_token` and :paramref:`~.EnvironmentContext.configure.downgrade_token` parameters are changed, so that the collection of template variables gains distinct entries for each engine, which are then referred to explicitly within ``script.py.mako``. In terms of the :paramref:`.EnvironmentContext.configure.process_revision_directives` hook, the behavior here is that the ``process_revision_directives`` hook is invoked **multiple times, once for each call to context.run_migrations()**. This means that if a multi-``run_migrations()`` approach is to be combined with the ``process_revision_directives`` hook, care must be taken to use the hook appropriately. The first point to note is that when a **second** call to ``run_migrations()`` occurs, the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are **converted into Python lists**, and new :class:`.UpgradeOps` and :class:`.DowngradeOps` objects are appended to these lists. Each :class:`.UpgradeOps` and :class:`.DowngradeOps` object maintains an ``.upgrade_token`` and a ``.downgrade_token`` attribute respectively, which serves to render their contents into the appropriate template token. For example, a multi-engine run that has the engine names ``engine1`` and ``engine2`` will generate tokens of ``engine1_upgrades``, ``engine1_downgrades``, ``engine2_upgrades`` and ``engine2_downgrades`` as it runs. The resulting migration structure would look like this:: from alembic.operations import ops import sqlalchemy as sa migration_script = ops.MigrationScript( 'eced083f5df', [ ops.UpgradeOps( ops=[ # upgrade operations for "engine1" ], upgrade_token="engine1_upgrades" ), ops.UpgradeOps( ops=[ # upgrade operations for "engine2" ], upgrade_token="engine2_upgrades" ), ], [ ops.DowngradeOps( ops=[ # downgrade operations for "engine1" ], downgrade_token="engine1_downgrades" ), ops.DowngradeOps( ops=[ # downgrade operations for "engine2" ], downgrade_token="engine2_downgrades" ) ], message='migration message' ) Given the above, the following guidelines should be considered when the ``env.py`` script calls upon :meth:`.MigrationContext.run_migrations` multiple times when running autogenerate: * If the ``process_revision_directives`` hook aims to **add elements based on inspection of the current database / connection**, it should do its operation **on each iteration**. This is so that each time the hook runs, the database is available. * Alternatively, if the ``process_revision_directives`` hook aims to **modify the list of migration directives in place**, this should be called **only on the last iteration**. This is so that the hook isn't being given an ever-growing structure each time which it has already modified previously. * The :class:`.Rewriter` object, if used, should be called **only on the last iteration**, because it will always deliver all directives every time, so again to avoid double/triple/etc. processing of directives it should be called only when the structure is complete. * The :attr:`.MigrationScript.upgrade_ops_list` and :attr:`.MigrationScript.downgrade_ops_list` attributes should be consulted when referring to the collection of :class:`.UpgradeOps` and :class:`.DowngradeOps` objects. .. _autogen_custom_ops: Autogenerating Custom Operation Directives ========================================== In the section :ref:`operation_plugins`, we talked about adding new subclasses of :class:`.MigrateOperation` in order to add new ``op.`` directives. In the preceding section :ref:`customizing_revision`, we also learned that these same :class:`.MigrateOperation` structures are at the base of how the autogenerate system knows what Python code to render. Using this knowledge, we can create additional functions that plug into the autogenerate system so that our new operations can be generated into migration scripts when ``alembic revision --autogenerate`` is run. The following sections will detail an example of this using the the ``CreateSequenceOp`` and ``DropSequenceOp`` directives we created in :ref:`operation_plugins`, which correspond to the SQLAlchemy :class:`~sqlalchemy.schema.Sequence` construct. Tracking our Object with the Model ---------------------------------- The basic job of an autogenerate comparison function is to inspect a series of objects in the database and compare them against a series of objects defined in our model. By "in our model", we mean anything defined in Python code that we want to track, however most commonly we're talking about a series of :class:`~sqlalchemy.schema.Table` objects present in a :class:`~sqlalchemy.schema.MetaData` collection. Let's propose a simple way of seeing what :class:`~sqlalchemy.schema.Sequence` objects we want to ensure exist in the database when autogenerate runs. While these objects do have some integrations with :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` already, let's assume they don't, as the example here intends to illustrate how we would do this for most any kind of custom construct. We associate the object with the :attr:`~sqlalchemy.schema.MetaData.info` collection of :class:`~sqlalchemy.schema.MetaData`, which is a dictionary we can use for anything, which we also know will be passed to the autogenerate process:: from sqlalchemy.schema import Sequence def add_sequence_to_model(sequence, metadata): metadata.info.setdefault("sequences", set()).add( (sequence.schema, sequence.name) ) my_seq = Sequence("my_sequence") add_sequence_to_model(my_seq, model_metadata) The :attr:`~sqlalchemy.schema.MetaData.info` dictionary is a good place to put things that we want our autogeneration routines to be able to locate, which can include any object such as custom DDL objects representing views, triggers, special constraints, or anything else we want to support. Registering a Comparison Function --------------------------------- We now need to register a comparison hook, which will be used to compare the database to our model and produce ``CreateSequenceOp`` and ``DropSequenceOp`` directives to be included in our migration script. Note that we are assuming a Postgresql backend:: from alembic.autogenerate import comparators @comparators.dispatch_for("schema") def compare_sequences(autogen_context, upgrade_ops, schemas): all_conn_sequences = set() for sch in schemas: all_conn_sequences.update([ (sch, row[0]) for row in autogen_context.connection.execute( "SELECT relname FROM pg_class c join " "pg_namespace n on n.oid=c.relnamespace where " "relkind='S' and n.nspname=%(nspname)s", # note that we consider a schema of 'None' in our # model to be the "default" name in the PG database; # this usually is the name 'public' nspname=autogen_context.dialect.default_schema_name if sch is None else sch ) ]) # get the collection of Sequence objects we're storing with # our MetaData metadata_sequences = autogen_context.metadata.info.setdefault( "sequences", set()) # for new names, produce CreateSequenceOp directives for sch, name in metadata_sequences.difference(all_conn_sequences): upgrade_ops.ops.append( CreateSequenceOp(name, schema=sch) ) # for names that are going away, produce DropSequenceOp # directives for sch, name in all_conn_sequences.difference(metadata_sequences): upgrade_ops.ops.append( DropSequenceOp(name, schema=sch) ) Above, we've built a new function ``compare_sequences()`` and registered it as a "schema" level comparison function with autogenerate. The job that it performs is that it compares the list of sequence names present in each database schema with that of a list of sequence names that we are maintaining in our :class:`~sqlalchemy.schema.MetaData` object. When autogenerate completes, it will have a series of ``CreateSequenceOp`` and ``DropSequenceOp`` directives in the list of "upgrade" operations; the list of "downgrade" operations is generated directly from these using the ``CreateSequenceOp.reverse()`` and ``DropSequenceOp.reverse()`` methods that we've implemented on these objects. The registration of our function at the scope of "schema" means our autogenerate comparison function is called outside of the context of any specific table or column. The three available scopes are "schema", "table", and "column", summarized as follows: * **Schema level** - these hooks are passed a :class:`.AutogenContext`, an :class:`.UpgradeOps` collection, and a collection of string schema names to be operated upon. If the :class:`.UpgradeOps` collection contains changes after all hooks are run, it is included in the migration script: :: @comparators.dispatch_for("schema") def compare_schema_level(autogen_context, upgrade_ops, schemas): pass * **Table level** - these hooks are passed a :class:`.AutogenContext`, a :class:`.ModifyTableOps` collection, a schema name, table name, a :class:`~sqlalchemy.schema.Table` reflected from the database if any or ``None``, and a :class:`~sqlalchemy.schema.Table` present in the local :class:`~sqlalchemy.schema.MetaData`. If the :class:`.ModifyTableOps` collection contains changes after all hooks are run, it is included in the migration script: :: @comparators.dispatch_for("table") def compare_table_level(autogen_context, modify_ops, schemaname, tablename, conn_table, metadata_table): pass * **Column level** - these hooks are passed a :class:`.AutogenContext`, an :class:`.AlterColumnOp` object, a schema name, table name, column name, a :class:`~sqlalchemy.schema.Column` reflected from the database and a :class:`~sqlalchemy.schema.Column` present in the local table. If the :class:`.AlterColumnOp` contains changes after all hooks are run, it is included in the migration script; a "change" is considered to be present if any of the ``modify_`` attributes are set to a non-default value, or there are any keys in the ``.kw`` collection with the prefix ``"modify_"``: :: @comparators.dispatch_for("column") def compare_column_level(autogen_context, alter_column_op, schemaname, tname, cname, conn_col, metadata_col): pass The :class:`.AutogenContext` passed to these hooks is documented below. .. autoclass:: alembic.autogenerate.api.AutogenContext :members: Creating a Render Function -------------------------- The second autogenerate integration hook is to provide a "render" function; since the autogenerate system renders Python code, we need to build a function that renders the correct "op" instructions for our directive:: from alembic.autogenerate import renderers @renderers.dispatch_for(CreateSequenceOp) def render_create_sequence(autogen_context, op): return "op.create_sequence(%r, **%r)" % ( op.sequence_name, {"schema": op.schema} ) @renderers.dispatch_for(DropSequenceOp) def render_drop_sequence(autogen_context, op): return "op.drop_sequence(%r, **%r)" % ( op.sequence_name, {"schema": op.schema} ) The above functions will render Python code corresponding to the presence of ``CreateSequenceOp`` and ``DropSequenceOp`` instructions in the list that our comparison function generates. Running It ---------- All the above code can be organized however the developer sees fit; the only thing that needs to make it work is that when the Alembic environment ``env.py`` is invoked, it either imports modules which contain all the above routines, or they are locally present, or some combination thereof. If we then have code in our model (which of course also needs to be invoked when ``env.py`` runs!) like this:: from sqlalchemy.schema import Sequence my_seq_1 = Sequence("my_sequence_1") add_sequence_to_model(my_seq_1, target_metadata) When we first run ``alembic revision --autogenerate``, we'll see this in our migration file:: def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_sequence('my_sequence_1', **{'schema': None}) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_sequence('my_sequence_1', **{'schema': None}) ### end Alembic commands ### These are our custom directives that will invoke when ``alembic upgrade`` or ``alembic downgrade`` is run. alembic-rel_1_7_6/docs/build/api/commands.rst000066400000000000000000000032711417624537100212750ustar00rootroot00000000000000.. _alembic.command.toplevel: ========= Commands ========= .. note:: this section discusses the **internal API of Alembic** as regards its command invocation system. This section is only useful for developers who wish to extend the capabilities of Alembic. For documentation on using Alembic commands, please see :doc:`/tutorial`. Alembic commands are all represented by functions in the :ref:`alembic.command.toplevel` package. They all accept the same style of usage, being sent the :class:`.Config` object as the first argument. Commands can be run programmatically, by first constructing a :class:`.Config` object, as in:: from alembic.config import Config from alembic import command alembic_cfg = Config("/path/to/yourapp/alembic.ini") command.upgrade(alembic_cfg, "head") In many cases, and perhaps more often than not, an application will wish to call upon a series of Alembic commands and/or other features. It is usually a good idea to link multiple commands along a single connection and transaction, if feasible. This can be achieved using the :attr:`.Config.attributes` dictionary in order to share a connection:: with engine.begin() as connection: alembic_cfg.attributes['connection'] = connection command.upgrade(alembic_cfg, "head") This recipe requires that ``env.py`` consumes this connection argument; see the example in :ref:`connection_sharing` for details. To write small API functions that make direct use of database and script directory information, rather than just running one of the built-in commands, use the :class:`.ScriptDirectory` and :class:`.MigrationContext` classes directly. .. automodule:: alembic.command :members: alembic-rel_1_7_6/docs/build/api/config.rst000066400000000000000000000023201417624537100207330ustar00rootroot00000000000000.. _alembic.config.toplevel: ============== Configuration ============== .. note:: this section discusses the **internal API of Alembic** as regards internal configuration constructs. This section is only useful for developers who wish to extend the capabilities of Alembic. For documentation on configuration of an Alembic environment, please see :doc:`/tutorial`. The :class:`.Config` object represents the configuration passed to the Alembic environment. From an API usage perspective, it is needed for the following use cases: * to create a :class:`.ScriptDirectory`, which allows you to work with the actual script files in a migration environment * to create an :class:`.EnvironmentContext`, which allows you to actually run the ``env.py`` module within the migration environment * to programmatically run any of the commands in the :ref:`alembic.command.toplevel` module. The :class:`.Config` is *not* needed for these cases: * to instantiate a :class:`.MigrationContext` directly - this object only needs a SQLAlchemy connection or dialect name. * to instantiate a :class:`.Operations` object - this object only needs a :class:`.MigrationContext`. .. automodule:: alembic.config :members: alembic-rel_1_7_6/docs/build/api/ddl.rst000066400000000000000000000021021417624537100202270ustar00rootroot00000000000000.. _alembic.ddl.toplevel: ============= DDL Internals ============= These are some of the constructs used to generate migration instructions. The APIs here build off of the :class:`sqlalchemy.schema.DDLElement` and :ref:`sqlalchemy.ext.compiler_toplevel` systems. For programmatic usage of Alembic's migration directives, the easiest route is to use the higher level functions given by :ref:`alembic.operations.toplevel`. .. automodule:: alembic.ddl :members: :undoc-members: .. automodule:: alembic.ddl.base :members: :undoc-members: .. automodule:: alembic.ddl.impl :members: :undoc-members: MySQL ============= .. automodule:: alembic.ddl.mysql :members: :undoc-members: :show-inheritance: MS-SQL ============= .. automodule:: alembic.ddl.mssql :members: :undoc-members: :show-inheritance: Postgresql ============= .. automodule:: alembic.ddl.postgresql :members: :undoc-members: :show-inheritance: SQLite ============= .. automodule:: alembic.ddl.sqlite :members: :undoc-members: :show-inheritance: alembic-rel_1_7_6/docs/build/api/index.rst000066400000000000000000000017651417624537100206110ustar00rootroot00000000000000.. _api: =========== API Details =========== Alembic's internal API has many public integration points that can be used to extend Alembic's functionality as well as to re-use its functionality in new ways. As the project has grown, more APIs are created and exposed for this purpose. Direct use of the vast majority of API details discussed here is not needed for rudimentary use of Alembic; the only API that is used normally by end users is the methods provided by the :class:`.Operations` class, which is discussed outside of this subsection, and the parameters that can be passed to the :meth:`.EnvironmentContext.configure` method, used when configuring one's ``env.py`` environment. However, real-world applications will usually end up using more of the internal API, in particular being able to run commands programmatically, as discussed in the section :doc:`/api/commands`. .. toctree:: :maxdepth: 2 overview runtime config commands operations autogenerate script ddl alembic-rel_1_7_6/docs/build/api/operations.rst000066400000000000000000000144151417624537100216610ustar00rootroot00000000000000.. _alembic.operations.toplevel: ===================== Operation Directives ===================== .. note:: this section discusses the **internal API of Alembic** as regards the internal system of defining migration operation directives. This section is only useful for developers who wish to extend the capabilities of Alembic. For end-user guidance on Alembic migration operations, please see :ref:`ops`. Within migration scripts, actual database migration operations are handled via an instance of :class:`.Operations`. The :class:`.Operations` class lists out available migration operations that are linked to a :class:`.MigrationContext`, which communicates instructions originated by the :class:`.Operations` object into SQL that is sent to a database or SQL output stream. Most methods on the :class:`.Operations` class are generated dynamically using a "plugin" system, described in the next section :ref:`operation_plugins`. Additionally, when Alembic migration scripts actually run, the methods on the current :class:`.Operations` object are proxied out to the ``alembic.op`` module, so that they are available using module-style access. For an overview of how to use an :class:`.Operations` object directly in programs, as well as for reference to the standard operation methods as well as "batch" methods, see :ref:`ops`. .. _operation_plugins: Operation Plugins ===================== The Operations object is extensible using a plugin system. This system allows one to add new ``op.`` methods at runtime. The steps to use this system are to first create a subclass of :class:`.MigrateOperation`, register it using the :meth:`.Operations.register_operation` class decorator, then build a default "implementation" function which is established using the :meth:`.Operations.implementation_for` decorator. Below we illustrate a very simple operation ``CreateSequenceOp`` which will implement a new method ``op.create_sequence()`` for use in migration scripts:: from alembic.operations import Operations, MigrateOperation @Operations.register_operation("create_sequence") class CreateSequenceOp(MigrateOperation): """Create a SEQUENCE.""" def __init__(self, sequence_name, schema=None): self.sequence_name = sequence_name self.schema = schema @classmethod def create_sequence(cls, operations, sequence_name, **kw): """Issue a "CREATE SEQUENCE" instruction.""" op = CreateSequenceOp(sequence_name, **kw) return operations.invoke(op) def reverse(self): # only needed to support autogenerate return DropSequenceOp(self.sequence_name, schema=self.schema) @Operations.register_operation("drop_sequence") class DropSequenceOp(MigrateOperation): """Drop a SEQUENCE.""" def __init__(self, sequence_name, schema=None): self.sequence_name = sequence_name self.schema = schema @classmethod def drop_sequence(cls, operations, sequence_name, **kw): """Issue a "DROP SEQUENCE" instruction.""" op = DropSequenceOp(sequence_name, **kw) return operations.invoke(op) def reverse(self): # only needed to support autogenerate return CreateSequenceOp(self.sequence_name, schema=self.schema) Above, the ``CreateSequenceOp`` and ``DropSequenceOp`` classes represent new operations that will be available as ``op.create_sequence()`` and ``op.drop_sequence()``. The reason the operations are represented as stateful classes is so that an operation and a specific set of arguments can be represented generically; the state can then correspond to different kinds of operations, such as invoking the instruction against a database, or autogenerating Python code for the operation into a script. In order to establish the migrate-script behavior of the new operations, we use the :meth:`.Operations.implementation_for` decorator:: @Operations.implementation_for(CreateSequenceOp) def create_sequence(operations, operation): if operation.schema is not None: name = "%s.%s" % (operation.schema, operation.sequence_name) else: name = operation.sequence_name operations.execute("CREATE SEQUENCE %s" % name) @Operations.implementation_for(DropSequenceOp) def drop_sequence(operations, operation): if operation.schema is not None: name = "%s.%s" % (operation.schema, operation.sequence_name) else: name = operation.sequence_name operations.execute("DROP SEQUENCE %s" % name) Above, we use the simplest possible technique of invoking our DDL, which is just to call :meth:`.Operations.execute` with literal SQL. If this is all a custom operation needs, then this is fine. However, options for more comprehensive support include building out a custom SQL construct, as documented at :ref:`sqlalchemy.ext.compiler_toplevel`. With the above two steps, a migration script can now use new methods ``op.create_sequence()`` and ``op.drop_sequence()`` that will proxy to our object as a classmethod:: def upgrade(): op.create_sequence("my_sequence") def downgrade(): op.drop_sequence("my_sequence") The registration of new operations only needs to occur in time for the ``env.py`` script to invoke :meth:`.MigrationContext.run_migrations`; within the module level of the ``env.py`` script is sufficient. .. seealso:: :ref:`autogen_custom_ops` - how to add autogenerate support to custom operations. .. _operation_objects: .. _alembic.operations.ops.toplevel: Built-in Operation Objects ============================== The migration operations present on :class:`.Operations` are themselves delivered via operation objects that represent an operation and its arguments. All operations descend from the :class:`.MigrateOperation` class, and are registered with the :class:`.Operations` class using the :meth:`.Operations.register_operation` class decorator. The :class:`.MigrateOperation` objects also serve as the basis for how the autogenerate system renders new migration scripts. .. seealso:: :ref:`operation_plugins` :ref:`customizing_revision` The built-in operation objects are listed below. .. automodule:: alembic.operations.ops :members: alembic-rel_1_7_6/docs/build/api/overview.rst000066400000000000000000000065211417624537100213430ustar00rootroot00000000000000======== Overview ======== .. note:: this section is a technical overview of the **internal API of Alembic**. This section is only useful for developers who wish to extend the capabilities of Alembic; for regular users, reading this section is **not necessary**. A visualization of the primary features of Alembic's internals is presented in the following figure. The module and class boxes do not list out all the operations provided by each unit; only a small set of representative elements intended to convey the primary purpose of each system. .. image:: api_overview.png The script runner for Alembic is present in the :ref:`alembic.config.toplevel` module. This module produces a :class:`.Config` object and passes it to the appropriate function in :ref:`alembic.command.toplevel`. Functions within :ref:`alembic.command.toplevel` will typically instantiate an :class:`.ScriptDirectory` instance, which represents the collection of version files, and an :class:`.EnvironmentContext`, which is a configurational facade passed to the environment's ``env.py`` script. The :class:`.EnvironmentContext` object is the primary object used within the ``env.py`` script, whose main purpose is that of a facade for creating and using a :class:`.MigrationContext` object, which is the actual migration engine that refers to a database implementation. The primary method called on this object within an ``env.py`` script is the :meth:`.EnvironmentContext.configure` method, which sets up the :class:`.MigrationContext` with database connectivity and behavioral configuration. It also supplies methods for transaction demarcation and migration running, but these methods ultimately call upon the :class:`.MigrationContext` that's been configured. :class:`.MigrationContext` is the gateway to the database for other parts of the application, and produces a :class:`.DefaultImpl` object which does the actual database communication, and knows how to create the specific SQL text of the various DDL directives such as ALTER TABLE; :class:`.DefaultImpl` has subclasses that are per-database-backend. In "offline" mode (e.g. ``--sql``), the :class:`.MigrationContext` will produce SQL to a file output stream instead of a database. During an upgrade or downgrade operation, a specific series of migration scripts are invoked starting with the :class:`.MigrationContext` in conjunction with the :class:`.ScriptDirectory`; the actual scripts themselves make use of the :class:`.Operations` object, which provide the end-user interface to specific database operations. The :class:`.Operations` object is generated based on a series of "operation directive" objects that are user-extensible, and start out in the :ref:`alembic.operations.ops.toplevel` module. Another prominent feature of Alembic is the "autogenerate" feature, which produces new migration scripts that contain Python code. The autogenerate feature starts in :ref:`alembic.autogenerate.toplevel`, and is used exclusively by the :func:`.alembic.command.revision` command when the ``--autogenerate`` flag is passed. Autogenerate refers to the :class:`.MigrationContext` and :class:`.DefaultImpl` in order to access database connectivity and access per-backend rules for autogenerate comparisons. It also makes use of :ref:`alembic.operations.ops.toplevel` in order to represent the operations that it will render into scripts. alembic-rel_1_7_6/docs/build/api/runtime.rst000066400000000000000000000027401417624537100211570ustar00rootroot00000000000000.. _alembic.runtime.environment.toplevel: ======================= Runtime Objects ======================= The "runtime" of Alembic involves the :class:`.EnvironmentContext` and :class:`.MigrationContext` objects. These are the objects that are in play once the ``env.py`` script is loaded up by a command and a migration operation proceeds. The Environment Context ======================= The :class:`.EnvironmentContext` class provides most of the API used within an ``env.py`` script. Within ``env.py``, the instantated :class:`.EnvironmentContext` is made available via a special *proxy module* called ``alembic.context``. That is, you can import ``alembic.context`` like a regular Python module, and each name you call upon it is ultimately routed towards the current :class:`.EnvironmentContext` in use. In particular, the key method used within ``env.py`` is :meth:`.EnvironmentContext.configure`, which establishes all the details about how the database will be accessed. .. automodule:: alembic.runtime.environment :members: EnvironmentContext .. _alembic.runtime.migration.toplevel: The Migration Context ===================== The :class:`.MigrationContext` handles the actual work to be performed against a database backend as migration operations proceed. It is generally not exposed to the end-user, except when the :paramref:`~.EnvironmentContext.configure.on_version_apply` callback hook is used. .. automodule:: alembic.runtime.migration :members: MigrationContext alembic-rel_1_7_6/docs/build/api/script.rst000066400000000000000000000010251417624537100207730ustar00rootroot00000000000000.. _alembic.script.toplevel: ================ Script Directory ================ The :class:`.ScriptDirectory` object provides programmatic access to the Alembic version files present in the filesystem. .. automodule:: alembic.script :members: Revision ======== The :class:`.RevisionMap` object serves as the basis for revision management, used exclusively by :class:`.ScriptDirectory`. .. automodule:: alembic.script.revision :members: Write Hooks =========== .. automodule:: alembic.script.write_hooks :members: alembic-rel_1_7_6/docs/build/assets/000077500000000000000000000000001417624537100174705ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/assets/api_overview.graffle000066400000000000000000002072571417624537100235340ustar00rootroot00000000000000 ActiveLayerIndex 0 ApplicationVersion com.omnigroup.OmniGrafflePro 139.18.0.187838 AutoAdjust BackgroundGraphic Bounds {{0, 0}, {1176, 768}} Class SolidGraphic ID 2 Style shadow Draws NO stroke Draws NO BaseZoom 0 CanvasOrigin {0, 0} ColumnAlign 1 ColumnSpacing 36 CreationDate 2012-01-24 21:51:07 +0000 Creator classic DisplayScale 1 0/72 in = 1.0000 in GraphDocumentVersion 8 GraphicsList Bounds {{601.74580087231288, 420}, {84, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2140 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<instantiates>>} VerticalPad 0 Wrap NO Class TableGroup Graphics Bounds {{191, 107.40116119384766}, {102.9071044921875, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2132 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 PostgresqlImpl} VerticalPad 0 TextPlacement 0 GroupConnect YES ID 2131 Class TableGroup Graphics Bounds {{230.9169921875, 132.80233001708984}, {102.9071044921875, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2130 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 MSSQLImpl} VerticalPad 0 TextPlacement 0 GroupConnect YES ID 2129 Class TableGroup Graphics Bounds {{226, 82}, {102.9071044921875, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2127 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 MySQLImpl} VerticalPad 0 TextPlacement 0 GroupConnect YES ID 2126 Class LineGraphic Head ID 2055 ID 2135 Points {280.22809604806071, 146.80233001708984} {272.46503226582109, 172.16651000976572} Style stroke HeadArrow UMLInheritance Legacy TailArrow 0 Tail ID 2129 Class LineGraphic Head ID 2055 ID 2134 Points {243.64926792598939, 121.40116119384763} {252.32082843664148, 172.16651000976572} Style stroke HeadArrow UMLInheritance Legacy TailArrow 0 Tail ID 2131 Class LineGraphic Head ID 2055 ID 2133 Points {276.4518773872507, 95.999999999999986} {265.55272336402226, 172.16651000976572} Style stroke HeadArrow UMLInheritance Legacy TailArrow 0 Tail ID 2126 Bounds {{504, 310}, {84, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2125 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<instantiates>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 33 ID 2124 OrthogonalBarAutomatic OrthogonalBarPoint {0, 0} OrthogonalBarPosition 16 Points {563, 340.34042553191489} {497.13201904296875, 327.88251038766401} Style stroke HeadArrow StickArrow Legacy LineType 2 Pattern 1 TailArrow 0 Tail ID 2072 Bounds {{494.00001409542369, 415.9000186920166}, {55, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2123 Line ID 2139 Position 0.37128287553787231 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<uses>>} VerticalPad 0 Wrap NO Bounds {{713.35945466160774, 356.11699358749399}, {55, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2122 Line ID 2121 Position 0.49189183115959167 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<uses>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 2081 Info 5 ID 2121 Points {702, 363.10150901307452} {781, 361.10002136230463} Style stroke HeadArrow StickArrow HopLines HopType 102 Legacy Pattern 1 TailArrow 0 Tail ID 2072 Class LineGraphic Head ID 2059 ID 2120 OrthogonalBarAutomatic OrthogonalBarPoint {0, 0} OrthogonalBarPosition -1 Points {637, 406} {565.78369522094727, 454.05202861384231} Style stroke HeadArrow StickArrow Legacy LineType 2 Pattern 1 TailArrow 0 Tail ID 2072 Bounds {{717, 400}, {68, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2119 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<invokes>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 2072 Info 5 ID 2118 OrthogonalBarAutomatic OrthogonalBarPoint {0, 0} OrthogonalBarPosition -1 Points {759.34192925872742, 429.89997863769531} {702, 384.99999999999994} Style stroke HeadArrow StickArrow Legacy LineType 2 Pattern 1 TailArrow 0 Tail ID 2048 Info 3 Bounds {{603.74580087231288, 470.3107529903566}, {80, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2117 Line ID 2116 Position 0.47171458601951599 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<configures>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 2059 ID 2116 Points {713.35941696166992, 476.88540101271974} {565.78369522094727, 475.66718967115884} Style stroke HeadArrow StickArrow HopLines HopType 102 Legacy Pattern 1 TailArrow 0 Tail ID 2048 Bounds {{816, 258.37493918977634}, {69, 24}} Class ShapedGraphic FitText YES Flow Resize ID 2113 Line ID 2109 Position 0.46421170234680176 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<generates,\ renders>>} VerticalPad 0 Wrap NO Bounds {{705.05227716905051, 191.22492316822797}, {69, 24}} Class ShapedGraphic FitText YES Flow Resize ID 2112 Line ID 2108 Position 0.46593526005744934 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<provides\ operations>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 2098 ID 2109 Points {850.5, 298.10002136230469} {850.50001322861976, 238.37493896484375} Style stroke HeadArrow StickArrow HopLines HopType 102 Legacy Pattern 1 TailArrow 0 Tail ID 2081 Class LineGraphic Head ID 38 ID 2108 Points {781.00002098083496, 203.28096591495026} {692.04400634765625, 203.16068579982147} Style stroke HeadArrow StickArrow Legacy Pattern 1 TailArrow 0 Tail ID 2098 Bounds {{623.48996514081955, 291.09998092651369}, {55, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2107 Line ID 2105 Position 0.43473681807518005 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<uses>>} VerticalPad 0 Wrap NO Bounds {{513.14304282962803, 197.37493856351756}, {55, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2106 Line ID 2104 Position 0.3995765745639801 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<uses>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 41 Info 4 ID 2105 OrthogonalBarAutomatic OrthogonalBarPoint {0, 0} OrthogonalBarPosition 5.1000003814697266 Points {781, 339.20153037537921} {747, 331} {744, 297.09998092651369} {533, 272.33299255371094} {526, 233} {491.30664526513783, 232.60000610351562} Style stroke HeadArrow StickArrow Legacy LineType 2 Pattern 1 TailArrow 0 Tail ID 2081 Info 2 Class LineGraphic Head ID 41 ID 2104 Points {572.95599365234375, 203} {492.0880126953125, 203.93833970103648} Style stroke HeadArrow StickArrow HopLines HopType 102 Legacy Pattern 1 TailArrow 0 Tail ID 38 Bounds {{392.47411627278478, 268.53371033283503}, {84, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2103 Line ID 2102 Offset 1 Position 0.46998947858810425 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<instantiates>>} VerticalPad 0 Wrap NO Class LineGraphic Head ID 41 ID 2102 Points {435.00741612193735, 298.09998092651369} {436.00000000000011, 248} Style stroke HeadArrow StickArrow HopLines HopType 102 Legacy Pattern 1 TailArrow 0 Tail ID 33 Bounds {{320.83625227212906, 209.28763384458864}, {55, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2101 Line ID 2040 Position 0.39780238270759583 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<uses>>} VerticalPad 0 Wrap NO Class TableGroup Graphics Bounds {{781.00002098083496, 168.37493896484375}, {139, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2099 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 alembic.operations.op} VerticalPad 0 TextPlacement 0 Bounds {{781.00002098083496, 182.37493896484375}, {139, 56}} Class ShapedGraphic FitText Vertical Flow Resize ID 2100 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 CreateTableOp\ AlterColumnOp\ AddColumnOp\ DropColumnOp} VerticalPad 0 TextPlacement 0 GridH 2099 2100 GroupConnect YES ID 2098 Bounds {{333.24926419826539, 462.28131709379346}, {78, 12}} Class ShapedGraphic FitText YES Flow Resize ID 2090 Line ID 2068 Position 0.44118145108222961 RotationType 0 Shape Rectangle Style shadow Draws NO stroke Draws NO Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs20 \cf0 <<read/write>>} VerticalPad 0 Wrap NO Class TableGroup Graphics Bounds {{781, 298.10002136230469}, {139, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2082 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 alembic.autogenerate} VerticalPad 0 TextPlacement 0 Bounds {{781, 312.10002136230469}, {139, 70}} Class ShapedGraphic FitText Vertical Flow Resize ID 2083 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 compare_metadata()\ produce_migrations()\ compare\ render\ generate} VerticalPad 0 TextPlacement 0 GridH 2082 2083 GroupConnect YES ID 2081 Magnets {0.032374100719424703, 0.5} {-0.5071942446043165, -0.010850225176129769} {0.52163523392711664, 0} {0, -0.5} {-0.5, 0.24999999999999911} Class TableGroup Graphics Bounds {{563, 322}, {139, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2073 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 alembic.command} VerticalPad 0 TextPlacement 0 Bounds {{563, 336}, {139, 70}} Class ShapedGraphic FitText Vertical Flow Resize ID 2074 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 init()\ revision()\ upgrade()\ downgrade()\ history()} VerticalPad 0 TextPlacement 0 GridH 2073 2074 GroupConnect YES ID 2072 Magnets {0.032374100719424703, 0.5} {-0.5071942446043165, -0.010850225176129769} {0.26978417266187105, 0.50105453672863209} {0.16675024238421798, -0.51583989461263036} {0.5, 0.24999999999999911} {0.50000000000000089, -0.010696321272922305} {-0.50719424460431561, -0.28571428571428559} Class LineGraphic Head ID 2067 ID 2068 Points {426.78369522094727, 467.79283450278251} {303.17371368408192, 468.90004920959467} Style stroke HeadArrow StickArrow HopLines HopType 102 Legacy Pattern 1 TailArrow 0 Tail ID 2059 Class Group Graphics Bounds {{218.92971038818359, 448.71651649475098}, {74.487998962402344, 46}} Class ShapedGraphic FitText Vertical Flow Resize FontInfo Font Helvetica Size 10 ID 2066 Shape Rectangle Style Text Align 0 Pad 1 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural \f0\fs20 \cf0 \expnd0\expndtw0\kerning0 /versions/a.py\ /versions/b.py\ /versions/...} Bounds {{209.17371368408203, 424.9000186920166}, {94, 84}} Class ShapedGraphic ID 2067 Magnets {0.49999999999999911, -0.30952344621930905} {0.49999999999999911, 0.023809887114024875} Shape Rectangle Style Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc \f0\fs24 \cf0 filesystem} TextPlacement 0 ID 2065 Class TableGroup Graphics Bounds {{426.78369522094727, 442.76912879943848}, {139, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2060 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 ScriptDirectory} VerticalPad 0 TextPlacement 0 Bounds {{426.78369522094727, 456.76912879943848}, {139, 42}} Class ShapedGraphic FitText Vertical Flow Resize ID 2061 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 walk_revisions()\ get_revision()\ generate_revision()} VerticalPad 0 TextPlacement 0 GridH 2060 2061 GroupConnect YES ID 2059 Magnets {0.51040606996823534, 0.089285714285713524} {0.25000000000000044, -0.50000000000000089} {-0.50398241924039766, -0.053571428571430602} {-0.00038529693823985411, 0.5357142857142847} {0.5015561494895886, -0.29944872856140314} Class LineGraphic Head ID 2038 ID 2058 Points {259.5464429157899, 256.16651000976572} {259.5464429157899, 299.49998778426624} Style stroke HeadArrow StickArrow Legacy Pattern 1 TailArrow 0 Tail ID 2055 Class TableGroup Graphics Bounds {{208.09290313720703, 172.16651000976572}, {102.90709686279297, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2056 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 DefaultImpl} VerticalPad 0 TextPlacement 0 Bounds {{208.09290313720703, 186.16651000976572}, {102.90709686279297, 70}} Class ShapedGraphic FitText Vertical Flow Resize ID 2057 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 execute()\ create_table()\ alter_column()\ add_column()\ drop_column()} VerticalPad 0 TextPlacement 0 GridH 2056 2057 GroupConnect YES ID 2055 Class TableGroup Graphics Bounds {{713.35941696166992, 429.89997863769531}, {119.0880126953125, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 2049 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 alembic.config} VerticalPad 0 TextPlacement 0 Bounds {{713.35941696166992, 443.89997863769531}, {119.0880126953125, 42}} Class ShapedGraphic FitText Vertical Flow Resize ID 2050 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 Config\ Command\ main()} VerticalPad 0 TextPlacement 0 GridH 2049 2050 GroupConnect YES ID 2048 Magnets {0.5, -4.4408920985006262e-16} {-0.5, -0.25000000000000178} {-0.1138779104937786, -0.5} {-0.49999999999999911, 0.33902539955400712} Class LineGraphic Head ID 2055 ID 2040 Points {373, 215.59905413254651} {311, 214.81620239134219} Style stroke HeadArrow StickArrow Legacy Pattern 1 TailArrow 0 Tail ID 41 Bounds {{216.45355606079102, 299.9999877929688}, {86.1858, 84}} Class ShapedGraphic ID 2038 Shape Cylinder Style Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc \f0\fs24 \cf0 database} VerticalPad 0 Class TableGroup Graphics Bounds {{373, 180.20000610351565}, {119.0880126953125, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 42 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 MigrationContext} VerticalPad 0 TextPlacement 0 Bounds {{373, 194.20000610351565}, {119.0880126953125, 56}} Class ShapedGraphic FitText Vertical Flow Resize ID 44 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural \f0\fs24 \cf0 connection\ run_migrations()\ execute()\ stamp()} VerticalPad 0 TextPlacement 0 GridH 42 44 GroupConnect YES ID 41 Magnets {0.5, -0.16088094860684521} {0.0042301604752394972, -0.5514285714285716} {-0.49936690654431892, 0.0057142857142853387} {0.49343873986566722, 0.24857142857142822} {0.029020499831381219, 0.46857134137834766} Class TableGroup Graphics Bounds {{572.95599365234375, 175.59130477905273}, {119.0880126953125, 14}} Class ShapedGraphic FitText Vertical Flow Resize ID 39 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 Operations} VerticalPad 0 TextPlacement 0 Bounds {{572.95599365234375, 189.59130477905273}, {119.0880126953125, 70}} Class ShapedGraphic FitText Vertical Flow Resize ID 40 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 migration_context\ create_table()\ alter_column()\ add_column()\ drop_column()} VerticalPad 0 TextPlacement 0 GridH 39 40 GroupConnect YES ID 38 Magnets {-0.49999999999999911, -0.17370600927443736} Class TableGroup Graphics Bounds {{367.95599365234375, 298.09998092651369}, {129.176025390625, 14.000003814697266}} Class ShapedGraphic FitText Vertical Flow Resize ID 34 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc \f0\b\fs24 \cf0 EnvironmentContext} VerticalPad 0 TextPlacement 0 Bounds {{367.95599365234375, 312.09998855590823}, {129.176025390625, 70.000015258789062}} Class ShapedGraphic FitText Vertical Flow Resize ID 35 Shape Rectangle Style fill GradientCenter {-0.29411799999999999, -0.264706} Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 \f0\fs24 \cf0 migration_context\ configure()\ run_migrations()\ begin_transaction()\ is_offline_mode()} VerticalPad 0 TextPlacement 0 GridH 34 35 GroupConnect YES ID 33 Magnets {0.5, -0.14544617445169949} {0.019251798561151112, 0.50476190476190474} {0.019070177820008194, -0.49999999999999956} Bounds {{350, 148.9999938964844}, {164.82400000000001, 255.60000610351562}} Class ShapedGraphic ID 2036 Shape Rectangle Style fill Draws NO shadow Draws NO Fuzziness 0.0 stroke Color b 0.191506 g 0.389204 r 0.744565 CornerRadius 5 Pattern 1 Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural \f0\fs24 \cf0 env.py script} VerticalPad 0 TextPlacement 0 Wrap NO Bounds {{552, 149}, {169, 130.33299255371094}} Class ShapedGraphic ID 2032 Magnets {-0.43313956596913394, 0.50000000000000044} {0.014211640211639676, 0.49587157857074082} Shape Rectangle Style fill Draws NO shadow Draws NO Fuzziness 0.0 stroke Color b 0.191506 g 0.389204 r 0.744565 CornerRadius 5 Pattern 1 Text Align 0 Text {\rtf1\ansi\ansicpg1252\cocoartf1347\cocoasubrtf570 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;} \pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural \f0\fs24 \cf0 migration script} VerticalPad 0 TextPlacement 0 Wrap NO Class LineGraphic Head ID 2048 ID 2139 OrthogonalBarAutomatic OrthogonalBarPoint {0, 0} OrthogonalBarPosition -1 Points {435.00741612193735, 382.10000381469729} {548, 421.9000186920166} {601.38076234099412, 436} {713.35941696166992, 443.8999786376952} Style stroke HeadArrow StickArrow Legacy LineType 2 Pattern 1 TailArrow 0 Tail ID 33 Info 2 GridInfo GuidesLocked NO GuidesVisible YES HPages 2 ImageCounter 1 KeepToScale Layers Lock NO Name Layer 1 Print YES View YES LayoutInfo Animate NO circoMinDist 18 circoSeparation 0.0 layoutEngine dot neatoSeparation 0.0 twopiSeparation 0.0 LinksVisible NO MagnetsVisible NO MasterSheets ModificationDate 2015-07-02 23:12:07 +0000 Modifier classic NotesVisible NO Orientation 2 OriginVisible NO OutlineStyle Basic PageBreaks NO PrintInfo NSBottomMargin float 12 NSHorizonalPagination coded BAtzdHJlYW10eXBlZIHoA4QBQISEhAhOU051bWJlcgCEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAFxlwCG NSLeftMargin float 12 NSPaperSize size {612, 792} NSPrintReverseOrientation int 0 NSRightMargin float 12 NSTopMargin float 12 PrintOnePage ReadOnly NO RowAlign 1 RowSpacing 36 SheetTitle Canvas 1 SmartAlignmentGuidesActive YES SmartDistanceGuidesActive YES UniqueID 1 UseEntirePage VPages 1 WindowInfo CurrentSheet 0 ExpandedCanvases Frame {{130, 128}, {1193, 852}} ListView OutlineWidth 142 RightSidebar Sidebar SidebarWidth 138 VisibleRegion {{-8, 1}, {1193, 755}} Zoom 1 ZoomValues Canvas 1 1 1 alembic-rel_1_7_6/docs/build/autogenerate.rst000066400000000000000000001100361417624537100214040ustar00rootroot00000000000000Auto Generating Migrations =========================== Alembic can view the status of the database and compare against the table metadata in the application, generating the "obvious" migrations based on a comparison. This is achieved using the ``--autogenerate`` option to the ``alembic revision`` command, which places so-called *candidate* migrations into our new migrations file. We review and modify these by hand as needed, then proceed normally. To use autogenerate, we first need to modify our ``env.py`` so that it gets access to a table metadata object that contains the target. Suppose our application has a :ref:`declarative base ` in ``myapp.mymodel``. This base contains a :class:`~sqlalchemy.schema.MetaData` object which contains :class:`~sqlalchemy.schema.Table` objects defining our database. We make sure this is loaded in ``env.py`` and then passed to :meth:`.EnvironmentContext.configure` via the ``target_metadata`` argument. The ``env.py`` sample script used in the generic template already has a variable declaration near the top for our convenience, where we replace ``None`` with our :class:`~sqlalchemy.schema.MetaData`. Starting with:: # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None we change to:: from myapp.mymodel import Base target_metadata = Base.metadata .. note:: The above example refers to the **generic alembic env.py template**, e.g. the one created by default when calling upon ``alembic init``, and not the special-use templates such as ``multidb``. Please consult the source code and comments within the ``env.py`` script directly for specific guidance on where and how the autogenerate metadata is established. If we look later in the script, down in ``run_migrations_online()``, we can see the directive passed to :meth:`.EnvironmentContext.configure`:: def run_migrations_online(): engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.') with engine.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() We can then use the ``alembic revision`` command in conjunction with the ``--autogenerate`` option. Suppose our :class:`~sqlalchemy.schema.MetaData` contained a definition for the ``account`` table, and the database did not. We'd get output like:: $ alembic revision --autogenerate -m "Added account table" INFO [alembic.context] Detected added table 'account' Generating /path/to/foo/alembic/versions/27c6a30d7c24.py...done We can then view our file ``27c6a30d7c24.py`` and see that a rudimentary migration is already present:: """empty message Revision ID: 27c6a30d7c24 Revises: None Create Date: 2011-11-08 11:40:27.089406 """ # revision identifiers, used by Alembic. revision = '27c6a30d7c24' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table( 'account', sa.Column('id', sa.Integer()), sa.Column('name', sa.String(length=50), nullable=False), sa.Column('description', sa.VARCHAR(200)), sa.Column('last_transaction_date', sa.DateTime()), sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table("account") ### end Alembic commands ### The migration hasn't actually run yet, of course. We do that via the usual ``upgrade`` command. We should also go into our migration file and alter it as needed, including adjustments to the directives as well as the addition of other directives which these may be dependent on - specifically data changes in between creates/alters/drops. .. _autogenerate_detects: What does Autogenerate Detect (and what does it *not* detect?) -------------------------------------------------------------- The vast majority of user issues with Alembic centers on the topic of what kinds of changes autogenerate can and cannot detect reliably, as well as how it renders Python code for what it does detect. It is critical to note that **autogenerate is not intended to be perfect**. It is *always* necessary to manually review and correct the **candidate migrations** that autogenerate produces. The feature is getting more and more comprehensive and error-free as releases continue, but one should take note of the current limitations. Autogenerate **will detect**: * Table additions, removals. * Column additions, removals. * Change of nullable status on columns. * Basic changes in indexes and explicitly-named unique constraints * Basic changes in foreign key constraints Autogenerate can **optionally detect**: * Change of column type. This will occur if you set the :paramref:`.EnvironmentContext.configure.compare_type` parameter to ``True``. The default implementation will reliably detect major changes, such as between :class:`.Numeric` and :class:`.String`, as well as accommodate for the types generated by SQLAlchemy's "generic" types such as :class:`.Boolean`. Arguments that are shared between both types, such as length and precision values, will also be compared. If either the metadata type or database type has **additional** arguments beyond that of the other type, these are **not** compared, such as if one numeric type featured a "scale" and other type did not, this would be seen as the backing database not supporting the value, or reporting on a default that the metadata did not specify. The type comparison logic is fully extensible as well; see :ref:`compare_types` for details. .. versionchanged:: 1.4 type comparison code has been reworked such that column types are compared based on their rendered DDL, which should allow the functionality enabled by :paramref:`.EnvironmentContext.configure.compare_type` to be much more accurate, correctly accounting for the behavior of SQLAlchemy "generic" types as well as major arguments specified within types. * Change of server default. This will occur if you set the :paramref:`.EnvironmentContext.configure.compare_server_default` parameter to ``True``, or to a custom callable function. This feature works well for simple cases but cannot always produce accurate results. The Postgresql backend will actually invoke the "detected" and "metadata" values against the database to determine equivalence. The feature is off by default so that it can be tested on the target schema first. Like type comparison, it can also be customized by passing a callable; see the function's documentation for details. Autogenerate **can not detect**: * Changes of table name. These will come out as an add/drop of two different tables, and should be hand-edited into a name change instead. * Changes of column name. Like table name changes, these are detected as a column add/drop pair, which is not at all the same as a name change. * Anonymously named constraints. Give your constraints a name, e.g. ``UniqueConstraint('col1', 'col2', name="my_name")``. See the section :doc:`naming` for background on how to configure automatic naming schemes for constraints. * Special SQLAlchemy types such as :class:`~sqlalchemy.types.Enum` when generated on a backend which doesn't support ENUM directly - this because the representation of such a type in the non-supporting database, i.e. a CHAR+ CHECK constraint, could be any kind of CHAR+CHECK. For SQLAlchemy to determine that this is actually an ENUM would only be a guess, something that's generally a bad idea. To implement your own "guessing" function here, use the :meth:`sqlalchemy.events.DDLEvents.column_reflect` event to detect when a CHAR (or whatever the target type is) is reflected, and change it to an ENUM (or whatever type is desired) if it is known that that's the intent of the type. The :meth:`sqlalchemy.events.DDLEvents.after_parent_attach` can be used within the autogenerate process to intercept and un-attach unwanted CHECK constraints. Autogenerate can't currently, but **will eventually detect**: * Some free-standing constraint additions and removals may not be supported, including PRIMARY KEY, EXCLUDE, CHECK; these are not necessarily implemented within the autogenerate detection system and also may not be supported by the supporting SQLAlchemy dialect. * Sequence additions, removals - not yet implemented. Autogenerating Multiple MetaData collections -------------------------------------------- The ``target_metadata`` collection may also be defined as a sequence if an application has multiple :class:`~sqlalchemy.schema.MetaData` collections involved:: from myapp.mymodel1 import Model1Base from myapp.mymodel2 import Model2Base target_metadata = [Model1Base.metadata, Model2Base.metadata] The sequence of :class:`~sqlalchemy.schema.MetaData` collections will be consulted in order during the autogenerate process. Note that each :class:`~sqlalchemy.schema.MetaData` must contain **unique** table keys (e.g. the "key" is the combination of the table's name and schema); if two :class:`~sqlalchemy.schema.MetaData` objects contain a table with the same schema/name combination, an error is raised. .. _autogenerate_include_hooks: Controlling What to be Autogenerated ------------------------------------ The autogenerate process scans across all table objects within the database that is referred towards by the current database connection in use. The list of objects that are scanned in the target database connection include: * The "default" schema currently referred towards by the database connection. * If the :paramref:`.EnvironmentContext.configure.include_schemas` is set to ``True``, all non-default "schemas", which are those names returned by the :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` method of :class:`~sqlalchemy.engine.reflection.Inspector`. The SQLAlchemy document :ref:`sqla:schema_table_schema_name` discusses the concept of a "schema" in detail. * Within each "schema", all tables present are scanned using the :meth:`~sqlalchemy.engine.reflection.Inspector.get_table_names` method of :class:`~sqlalchemy.engine.reflection.Inspector`. * Within each "table", most sub-objects of the each :class:`~sqlalchemy.schema.Table` construct are scanned, including columns and some forms of constraints. This process ultimately involves the use of methods on :class:`~sqlalchemy.engine.reflection.Inspector` including :meth:`~sqlalchemy.engine.reflection.Inspector.get_columns`, :meth:`~sqlalchemy.engine.reflection.Inspector.get_indexes`, :meth:`~sqlalchemy.engine.reflection.Inspector.get_unique_constraints`, :meth:`~sqlalchemy.engine.reflection.Inspector.get_foreign_keys` (as of this writing, CHECK constraints and primary key constraints are not yet included). Omitting Schema Names from the Autogenerate Process ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As the above set of database objects are typically to be compared to the contents of a single :class:`~sqlalchemy.schema.MetaData` object, particularly when the :paramref:`.EnvironmentContext.configure.include_schemas` flag is enabled there is an important need to filter out unwanted "schemas", which for some database backends might be the list of all the databases present. This filtering is best performed using the :paramref:`.EnvironmentContext.configure.include_name` hook, which provides for a callable that may return a boolean true/false indicating if a particular schema name should be included:: def include_name(name, type_, parent_names): if type_ == "schema": # note this will not include the default schema return name in ["schema_one", "schema_two"] else: return True context.configure( # ... include_schemas = True, include_name = include_name ) Above, when the list of schema names is first retrieved, the names will be filtered through the above ``include_name`` function so that only schemas named ``"schema_one"`` and ``"schema_two"`` will be considered by the autogenerate process. In order to include **the default schema**, that is, the schema that is referred towards by the database connection **without** any explicit schema being specified, the name passed to the hook is ``None``. To alter our above example to also include the default schema, we compare to ``None`` as well:: def include_name(name, type_, parent_names): if type_ == "schema": # this **will* include the default schema return name in [None, "schema_one", "schema_two"] else: return True context.configure( # ... include_schemas = True, include_name = include_name ) Omitting Table Names from the Autogenerate Process ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :paramref:`.EnvironmentContext.configure.include_name` hook is also most appropriate to limit the names of tables in the target database to be considered. If a target database has many tables that are not part of the :class:`~sqlalchemy.schema.MetaData`, the autogenerate process will normally assume these are extraneous tables in the database to be dropped, and it will generate a :meth:`.Operations.drop_table` operation for each. To prevent this, the :paramref:`.EnvironmentContext.configure.include_name` hook may be used to search for each name within the :attr:`~sqlalchemy.schema.MetaData.tables` collection of the :class:`~sqlalchemy.schema.MetaData` object and ensure names which aren't present are not included:: target_metadata = MyModel.metadata def include_name(name, type_, parent_names): if type_ == "table": return name in target_metadata.tables else: return True context.configure( # ... target_metadata = target_metadata, include_name = include_name, include_schemas = False ) The above example is limited to table names present in the default schema only. In order to search within a :class:`~sqlalchemy.schema.MetaData` collection for schema-qualified table names as well, a table present in the non default schema will be present under a name of the form ``.``. The :paramref:`.EnvironmentContext.configure.include_name` hook will present this schema name on a per-tablename basis in the ``parent_names`` dictionary, using the key ``"schema_name"`` that refers to the name of the schema currently being considered, or ``None`` if the schema is the default schema of the database connection:: # example fragment if parent_names["schema_name"] is None: return name in target_metadata.tables else: # build out schema-qualified name explicitly... return ( "%s.%s" % (parent_names["schema_name"], name) in target_metadata.tables ) However more simply, the ``parent_names`` dictionary will also include the dot-concatenated name already constructed under the key ``"schema_qualified_table_name"``, which will also be suitably formatted for tables in the default schema as well with the dot omitted. So the full example of omitting tables with schema support may look like:: target_metadata = MyModel.metadata def include_name(name, type_, parent_names): if type_ == "schema": return name in [None, "schema_one", "schema_two"] elif type_ == "table": # use schema_qualified_table_name directly return ( parent_names["schema_qualified_table_name"] in target_metadata.tables ) else: return True context.configure( # ... target_metadata = target_metadata, include_name = include_name, include_schemas = True ) The ``parent_names`` dictionary will also include the key ``"table_name"`` when the name being considered is that of a column or constraint object local to a particular table. The :paramref:`.EnvironmentContext.configure.include_name` hook only refers to **reflected** objects, and not those located within the target :class:`~sqlalchemy.schema.MetaData` collection. For more fine-grained rules that include both :class:`~sqlalchemy.schema.MetaData` and reflected object, the :paramref:`.EnvironmentContext.configure.include_object` hook discussed in the next section is more appropriate. .. versionadded:: 1.5 added the :paramref:`.EnvironmentContext.configure.include_name` hook. Omitting Based on Object ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :paramref:`.EnvironmentContext.configure.include_object` hook provides for object-level inclusion/exclusion rules based on the :class:`~sqlalchemy.schema.Table` object being reflected as well as the elements within it. This hook can be used to limit objects both from the local :class:`~sqlalchemy.schema.MetaData` collection as well as from the target database. The limitation is that when it reports on objects in the database, it will have fully reflected that object, which can be expensive if a large number of objects will be omitted. The example below refers to a fine-grained rule that will skip changes on :class:`~sqlalchemy.schema.Column` objects that have a user-defined flag ``skip_autogenerate`` placed into the :attr:`~sqlalchemy.schema.Column.info` dictionary:: def include_object(object, name, type_, reflected, compare_to): if (type_ == "column" and not reflected and object.info.get("skip_autogenerate", False)): return False else: return True context.configure( # ... include_object = include_object ) Comparing and Rendering Types ------------------------------ The area of autogenerate's behavior of comparing and rendering Python-based type objects in migration scripts presents a challenge, in that there's a very wide variety of types to be rendered in scripts, including those part of SQLAlchemy as well as user-defined types. A few options are given to help out with this task. .. _autogen_module_prefix: Controlling the Module Prefix ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When types are rendered, they are generated with a **module prefix**, so that they are available based on a relatively small number of imports. The rules for what the prefix is is based on the kind of datatype as well as configurational settings. For example, when Alembic renders SQLAlchemy types, it will by default prefix the type name with the prefix ``sa.``:: Column("my_column", sa.Integer()) The use of the ``sa.`` prefix is controllable by altering the value of :paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix`:: def run_migrations_online(): # ... context.configure( connection=connection, target_metadata=target_metadata, sqlalchemy_module_prefix="sqla.", # ... ) # ... In either case, the ``sa.`` prefix, or whatever prefix is desired, should also be included in the imports section of ``script.py.mako``; it also defaults to ``import sqlalchemy as sa``. For user-defined types, that is, any custom type that is not within the ``sqlalchemy.`` module namespace, by default Alembic will use the **value of __module__ for the custom type**:: Column("my_column", myapp.models.utils.types.MyCustomType()) The imports for the above type again must be made present within the migration, either manually, or by adding it to ``script.py.mako``. The above custom type has a long and cumbersome name based on the use of ``__module__`` directly, which also implies that lots of imports would be needed in order to accommodate lots of types. For this reason, it is recommended that user-defined types used in migration scripts be made available from a single module. Suppose we call it ``myapp.migration_types``:: # myapp/migration_types.py from myapp.models.utils.types import MyCustomType We can first add an import for ``migration_types`` to our ``script.py.mako``:: from alembic import op import sqlalchemy as sa import myapp.migration_types ${imports if imports else ""} We then override Alembic's use of ``__module__`` by providing a fixed prefix, using the :paramref:`.EnvironmentContext.configure.user_module_prefix` option:: def run_migrations_online(): # ... context.configure( connection=connection, target_metadata=target_metadata, user_module_prefix="myapp.migration_types.", # ... ) # ... Above, we now would get a migration like:: Column("my_column", myapp.migration_types.MyCustomType()) Now, when we inevitably refactor our application to move ``MyCustomType`` somewhere else, we only need modify the ``myapp.migration_types`` module, instead of searching and replacing all instances within our migration scripts. .. _autogen_render_types: Affecting the Rendering of Types Themselves ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The methodology Alembic uses to generate SQLAlchemy and user-defined type constructs as Python code is plain old ``__repr__()``. SQLAlchemy's built-in types for the most part have a ``__repr__()`` that faithfully renders a Python-compatible constructor call, but there are some exceptions, particularly in those cases when a constructor accepts arguments that aren't compatible with ``__repr__()``, such as a pickling function. When building a custom type that will be rendered into a migration script, it is often necessary to explicitly give the type a ``__repr__()`` that will faithfully reproduce the constructor for that type. This, in combination with :paramref:`.EnvironmentContext.configure.user_module_prefix`, is usually enough. However, if additional behaviors are needed, a more comprehensive hook is the :paramref:`.EnvironmentContext.configure.render_item` option. This hook allows one to provide a callable function within ``env.py`` that will fully take over how a type is rendered, including its module prefix:: def render_item(type_, obj, autogen_context): """Apply custom rendering for selected items.""" if type_ == 'type' and isinstance(obj, MySpecialType): return "mypackage.%r" % obj # default rendering for other objects return False def run_migrations_online(): # ... context.configure( connection=connection, target_metadata=target_metadata, render_item=render_item, # ... ) # ... In the above example, we'd ensure our ``MySpecialType`` includes an appropriate ``__repr__()`` method, which is invoked when we call it against ``"%r"``. The callable we use for :paramref:`.EnvironmentContext.configure.render_item` can also add imports to our migration script. The :class:`.AutogenContext` passed in contains a datamember called :attr:`.AutogenContext.imports`, which is a Python ``set()`` for which we can add new imports. For example, if ``MySpecialType`` were in a module called ``mymodel.types``, we can add the import for it as we encounter the type:: def render_item(type_, obj, autogen_context): """Apply custom rendering for selected items.""" if type_ == 'type' and isinstance(obj, MySpecialType): # add import for this type autogen_context.imports.add("from mymodel import types") return "types.%r" % obj # default rendering for other objects return False The finished migration script will include our imports where the ``${imports}`` expression is used, producing output such as:: from alembic import op import sqlalchemy as sa from mymodel import types def upgrade(): op.add_column('sometable', Column('mycolumn', types.MySpecialType())) .. _compare_types: Comparing Types ^^^^^^^^^^^^^^^^ The default type comparison logic will work for SQLAlchemy built in types as well as basic user defined types. This logic is only enabled if the :paramref:`.EnvironmentContext.configure.compare_type` parameter is set to True:: context.configure( # ... compare_type = True ) .. note:: The default type comparison logic (which is end-user extensible) currently (as of Alembic version 1.4.0) works by comparing the generated SQL for a column. It does this in two steps- * First, it compares the outer type of each column such as ``VARCHAR`` or ``TEXT``. Dialect implementations can have synonyms that are considered equivalent- this is because some databases support types by converting them to another type. For example, NUMERIC and DECIMAL are considered equivalent on all backends, while on the Oracle backend the additional synonyms BIGINT, INTEGER, NUMBER, SMALLINT are added to this list of equivalents * Next, the arguments within the type, such as the lengths of strings, precision values for numerics, the elements inside of an enumeration are compared. If BOTH columns have arguments AND they are different, a change will be detected. If one column is just set to the default and the other has arguments, Alembic will pass on attempting to compare these. The rationale is that it is difficult to detect what a database backend sets as a default value without generating false positives. .. versionchanged:: 1.4.0 Added the text and keyword comparison for column types Alternatively, the :paramref:`.EnvironmentContext.configure.compare_type` parameter accepts a callable function which may be used to implement custom type comparison logic, for cases such as where special user defined types are being used:: def my_compare_type(context, inspected_column, metadata_column, inspected_type, metadata_type): # return False if the metadata_type is the same as the inspected_type # or None to allow the default implementation to compare these # types. a return value of True means the two types do not # match and should result in a type change operation. return None context.configure( # ... compare_type = my_compare_type ) Above, ``inspected_column`` is a :class:`sqlalchemy.schema.Column` as returned by :meth:`sqlalchemy.engine.reflection.Inspector.reflect_table`, whereas ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from the local model environment. A return value of ``None`` indicates that default type comparison to proceed. Additionally, custom types that are part of imported or third party packages which have special behaviors such as per-dialect behavior should implement a method called ``compare_against_backend()`` on their SQLAlchemy type. If this method is present, it will be called where it can also return True or False to specify the types compare as equivalent or not; if it returns None, default type comparison logic will proceed:: class MySpecialType(TypeDecorator): # ... def compare_against_backend(self, dialect, conn_type): # return True if this type is the same as the given database type, # or None to allow the default implementation to compare these # types. a return value of False means the given type does not # match this type. if dialect.name == 'postgresql': return isinstance(conn_type, postgresql.UUID) else: return isinstance(conn_type, String) .. warning:: The boolean return values for the above ``compare_against_backend`` method, which is part of SQLAlchemy and not Alembic,are **the opposite** of that of the :paramref:`.EnvironmentContext.configure.compare_type` callable, returning ``True`` for types that are the same vs. ``False`` for types that are different.The :paramref:`.EnvironmentContext.configure.compare_type` callable on the other hand should return ``True`` for types that are **different**. The order of precedence regarding the :paramref:`.EnvironmentContext.configure.compare_type` callable vs. the type itself implementing ``compare_against_backend`` is that the :paramref:`.EnvironmentContext.configure.compare_type` callable is favored first; if it returns ``None``, then the ``compare_against_backend`` method will be used, if present on the metadata type. If that returns ``None``, then a basic check for type equivalence is run. .. versionadded:: 1.4.0 - added column keyword comparisons and the ``type_synonyms`` property. .. _post_write_hooks: Applying Post Processing and Python Code Formatters to Generated Revisions --------------------------------------------------------------------------- Revision scripts generated by the ``alembic revision`` command can optionally be piped through a series of post-production functions which may analyze or rewrite Python source code generated by Alembic, within the scope of running the ``revision`` command. The primary intended use of this feature is to run code-formatting tools such as `Black `_ or `autopep8 `_, as well as custom-written formatting and linter functions, on revision files as Alembic generates them. Any number of hooks can be configured and they will be run in series, given the path to the newly generated file as well as configuration options. The post write hooks, when configured, run against generated revision files regardless of whether or not the autogenerate feature was used. .. versionadded:: 1.2 .. note:: Alembic's post write system is partially inspired by the `pre-commit `_ tool, which configures git hooks that reformat source files as they are committed to a git repository. Pre-commit can serve this role for Alembic revision files as well, applying code formatters to them as they are committed. Alembic's post write hooks are useful only in that they can format the files immediately upon generation, rather than at commit time, and also can be useful for projects that prefer not to use pre-commit. Basic Formatter Configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``alembic.ini`` samples now include commented-out configuration illustrating how to configure code-formatting tools to run against the newly generated file path. Example:: [post_write_hooks] # format using "black" hooks=black black.type = console_scripts black.entrypoint = black black.options = -l 79 Above, we configure ``hooks`` to be a single post write hook labeled ``"black"``. Note that this label is arbitrary. We then define the configuration for the ``"black"`` post write hook, which includes: * ``type`` - this is the type of hook we are running. Alembic includes a hook runner called ``"console_scripts"``, which is specifically a Python function that uses ``subprocess.run()`` to invoke a separate Python script against the revision file. For a custom-written hook function, this configuration variable would refer to the name under which the custom hook was registered; see the next section for an example. The following configuration options are specific to the ``"console_scripts"`` hook runner: * ``entrypoint`` - the name of the `setuptools entrypoint `_ that is used to define the console script. Within the scope of standard Python console scripts, this name will match the name of the shell command that is usually run for the code formatting tool, in this case ``black``. * ``options`` - a line of command-line options that will be passed to the code formatting tool. In this case, we want to run the command ``black /path/to/revision.py -l 79``. By default, the revision path is positioned as the first argument. In order specify a different position, we can use the ``REVISION_SCRIPT_FILENAME`` token as illustrated by the subsequent examples. .. note:: Make sure options for the script are provided such that it will rewrite the input file **in place**. For example, when running ``autopep8``, the ``--in-place`` option should be provided:: [post_write_hooks] hooks = autopep8 autopep8.type = console_scripts autopep8.entrypoint = autopep8 autopep8.options = --in-place REVISION_SCRIPT_FILENAME * ``cwd`` - optional working directory from which the console script is run. When running ``alembic revision -m "rev1"``, we will now see the ``black`` tool's output as well:: $ alembic revision -m "rev1" Generating /path/to/project/versions/481b13bc369a_rev1.py ... done Running post write hook "black" ... reformatted /path/to/project/versions/481b13bc369a_rev1.py All done! ✨ 🍰 ✨ 1 file reformatted. done Hooks may also be specified as a list of names, which correspond to hook runners that will run sequentially. As an example, we can also run the `zimports `_ import rewriting tool (written by Alembic's author) subsequent to running the ``black`` tool, using a configuration as follows:: [post_write_hooks] # format using "black", then "zimports" hooks=black, zimports black.type = console_scripts black.entrypoint = black black.options = -l 79 REVISION_SCRIPT_FILENAME zimports.type = console_scripts zimports.entrypoint = zimports zimports.options = --style google REVISION_SCRIPT_FILENAME When using the above configuration, a newly generated revision file will be processed first by the "black" tool, then by the "zimports" tool. Alternatively, one can run pre-commit itself as follows:: [post_write_hooks] hooks = pre-commit pre-commit.type = console_scripts pre-commit.entrypoint = pre-commit pre-commit.options = run --files REVISION_SCRIPT_FILENAME pre-commit.cwd = %(here)s (The last line helps to ensure that the ``.pre-commit-config.yaml`` file will always be found, regardless of from where the hook was called.) .. _post_write_hooks_custom: Writing Custom Hooks as Python Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The previous section illustrated how to run command-line code formatters, through the use of a post write hook provided by Alembic known as ``console_scripts``. This hook is in fact a Python function that is registered under that name using a registration function that may be used to register other types of hooks as well. To illustrate, we will use the example of a short Python function that wants to rewrite the generated code to use tabs instead of four spaces. For simplicity, we will illustrate how this function can be present directly in the ``env.py`` file. The function is declared and registered using the :func:`.write_hooks.register` decorator:: from alembic.script import write_hooks import re @write_hooks.register("spaces_to_tabs") def convert_spaces_to_tabs(filename, options): lines = [] with open(filename) as file_: for line in file_: lines.append( re.sub( r"^( )+", lambda m: "\t" * (len(m.group(1)) // 4), line ) ) with open(filename, "w") as to_write: to_write.write("".join(lines)) Our new ``"spaces_to_tabs"`` hook can be configured in alembic.ini as follows:: [alembic] # ... # ensure the revision command loads env.py revision_environment = true [post_write_hooks] hooks = spaces_to_tabs spaces_to_tabs.type = spaces_to_tabs When ``alembic revision`` is run, the ``env.py`` file will be loaded in all cases, the custom "spaces_to_tabs" function will be registered and it will then be run against the newly generated file path:: $ alembic revision -m "rev1" Generating /path/to/project/versions/481b13bc369a_rev1.py ... done Running post write hook "spaces_to_tabs" ... done alembic-rel_1_7_6/docs/build/batch.rst000066400000000000000000000443571417624537100200160ustar00rootroot00000000000000.. _batch_migrations: Running "Batch" Migrations for SQLite and Other Databases ========================================================= The SQLite database presents a challenge to migration tools in that it has almost no support for the ALTER statement which relational schema migrations rely upon. The rationale for this stems from philosophical and architectural concerns within SQLite, and they are unlikely to be changed. Migration tools are instead expected to produce copies of SQLite tables that correspond to the new structure, transfer the data from the existing table to the new one, then drop the old table. For our purposes here we'll call this **"move and copy"** workflow, and in order to accommodate it in a way that is reasonably predictable, while also remaining compatible with other databases, Alembic provides the **batch** operations context. Within this context, a relational table is named, and then a series of mutation operations to that table alone are specified within the block. When the context is complete, a process begins whereby the "move and copy" procedure begins; the existing table structure is reflected from the database, a new version of this table is created with the given changes, data is copied from the old table to the new table using "INSERT from SELECT", and finally the old table is dropped and the new one renamed to the original name. The :meth:`.Operations.batch_alter_table` method provides the gateway to this process:: with op.batch_alter_table("some_table") as batch_op: batch_op.add_column(Column('foo', Integer)) batch_op.drop_column('bar') When the above directives are invoked within a migration script, on a SQLite backend we would see SQL like: .. sourcecode:: sql CREATE TABLE _alembic_batch_temp ( id INTEGER NOT NULL, foo INTEGER, PRIMARY KEY (id) ); INSERT INTO _alembic_batch_temp (id) SELECT some_table.id FROM some_table; DROP TABLE some_table; ALTER TABLE _alembic_batch_temp RENAME TO some_table; On other backends, we'd see the usual ``ALTER`` statements done as though there were no batch directive - the batch context by default only does the "move and copy" process if SQLite is in use, and if there are migration directives other than :meth:`.Operations.add_column` present, which is the one kind of column-level ALTER statement that SQLite supports. :meth:`.Operations.batch_alter_table` can be configured to run "move and copy" unconditionally in all cases, including on databases other than SQLite; more on this is below. .. _batch_controlling_table_reflection: Controlling Table Reflection ---------------------------- The :class:`~sqlalchemy.schema.Table` object that is reflected when "move and copy" proceeds is performed using the standard ``autoload=True`` approach. This call can be affected using the :paramref:`~.Operations.batch_alter_table.reflect_args` and :paramref:`~.Operations.batch_alter_table.reflect_kwargs` arguments. For example, to override a :class:`~sqlalchemy.schema.Column` within the reflection process such that a :class:`~sqlalchemy.types.Boolean` object is reflected with the ``create_constraint`` flag set to ``False``:: with self.op.batch_alter_table( "bar", reflect_args=[Column('flag', Boolean(create_constraint=False))] ) as batch_op: batch_op.alter_column( 'flag', new_column_name='bflag', existing_type=Boolean) Another use case, add a listener to the :class:`~sqlalchemy.schema.Table` as it is reflected so that special logic can be applied to columns or types, using the :meth:`~sqlalchemy.events.DDLEvents.column_reflect` event:: def listen_for_reflect(inspector, table, column_info): "correct an ENUM type" if column_info['name'] == 'my_enum': column_info['type'] = Enum('a', 'b', 'c') with self.op.batch_alter_table( "bar", reflect_kwargs=dict( listeners=[ ('column_reflect', listen_for_reflect) ] ) ) as batch_op: batch_op.alter_column( 'flag', new_column_name='bflag', existing_type=Boolean) The reflection process may also be bypassed entirely by sending a pre-fabricated :class:`~sqlalchemy.schema.Table` object; see :ref:`batch_offline_mode` for an example. .. _sqlite_batch_constraints: Dealing with Constraints ------------------------ There are a variety of issues when using "batch" mode with constraints, such as FOREIGN KEY, CHECK and UNIQUE constraints. This section will attempt to detail many of these scenarios. .. _dropping_sqlite_foreign_keys: Dropping Unnamed or Named Foreign Key Constraints ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SQLite, unlike any other database, allows constraints to exist in the database that have no identifying name. On all other backends, the target database will always generate some kind of name, if one is not given. The first challenge this represents is that an unnamed constraint can't by itself be targeted by the :meth:`.BatchOperations.drop_constraint` method. An unnamed FOREIGN KEY constraint is implicit whenever the :class:`~sqlalchemy.schema.ForeignKey` or :class:`~sqlalchemy.schema.ForeignKeyConstraint` objects are used without passing them a name. Only on SQLite will these constraints remain entirely unnamed when they are created on the target database; an automatically generated name will be assigned in the case of all other database backends. A second issue is that SQLAlchemy itself has inconsistent behavior in dealing with SQLite constraints as far as names. Prior to version 1.0, SQLAlchemy omits the name of foreign key constraints when reflecting them against the SQLite backend. So even if the target application has gone through the steps to apply names to the constraints as stated in the database, they still aren't targetable within the batch reflection process prior to SQLAlchemy 1.0. Within the scope of batch mode, this presents the issue that the :meth:`.BatchOperations.drop_constraint` method requires a constraint name in order to target the correct constraint. In order to overcome this, the :meth:`.Operations.batch_alter_table` method supports a :paramref:`~.Operations.batch_alter_table.naming_convention` argument, so that all reflected constraints, including foreign keys that are unnamed, or were named but SQLAlchemy isn't loading this name, may be given a name, as described in :ref:`autogen_naming_conventions`. Usage is as follows:: naming_convention = { "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", } with self.op.batch_alter_table( "bar", naming_convention=naming_convention) as batch_op: batch_op.drop_constraint( "fk_bar_foo_id_foo", type_="foreignkey") Note that the naming convention feature requires at least **SQLAlchemy 0.9.4** for support. Including unnamed UNIQUE constraints ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A similar, but frustratingly slightly different, issue is that in the case of UNIQUE constraints, we again have the issue that SQLite allows unnamed UNIQUE constraints to exist on the database, however in this case, SQLAlchemy prior to version 1.0 doesn't reflect these constraints at all. It does properly reflect named unique constraints with their names, however. So in this case, the workaround for foreign key names is still not sufficient prior to SQLAlchemy 1.0. If our table includes unnamed unique constraints, and we'd like them to be re-created along with the table, we need to include them directly, which can be via the :paramref:`~.Operations.batch_alter_table.table_args` argument:: with self.op.batch_alter_table( "bar", table_args=(UniqueConstraint('username'),) ): batch_op.add_column(Column('foo', Integer)) .. _batch_schematype_constraints: Changing the Type of Boolean, Enum and other implicit CHECK datatypes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The SQLAlchemy types :class:`~sqlalchemy.types.Boolean` and :class:`~sqlalchemy.types.Enum` are part of a category of types known as "schema" types; this style of type creates other structures along with the type itself, most commonly (but not always) a CHECK constraint. Alembic handles dropping and creating the CHECK constraints here automatically, including in the case of batch mode. When changing the type of an existing column, what's necessary is that the existing type be specified fully:: with self.op.batch_alter_table("some_table") as batch_op: batch_op.alter_column( 'q', type_=Integer, existing_type=Boolean(create_constraint=True, constraint_name="ck1")) When dropping a column that includes a named CHECK constraint, as of Alembic 1.7 this named constraint must also be provided using a similar form, as there is no ability for Alembic to otherwise link this reflected CHECK constraint as belonging to a particular column:: with self.op.batch_alter_table("some_table") as batch_op: batch_op.drop_column( 'q', existing_type=Boolean(create_constraint=True, constraint_name="ck1")) ) .. versionchanged:: 1.7 The :meth:`.BatchOperations.drop_column` operation can accept an ``existing_type`` directive where a "schema type" such as :class:`~sqlalchemy.types.Boolean` and :class:`~sqlalchemy.types.Enum` may be specified such that an associated named constraint can be removed. .. _batch_check_constraints: Including CHECK constraints ^^^^^^^^^^^^^^^^^^^^^^^^^^^ As of Alembic 1.7, **named** CHECK constraints are automatically included in batch mode, as modern SQLAlchemy versions are capable of reflecting these constraints like any other constraint. Note that when dropping or renaming a column that is mentioned in a named CHECK constraint, this CHECK constraint must be explicitly dropped first, as Alembic has no means of linking a reflected CHECK constraint to that column. Supposing column ``q`` of ``some_table`` were mentioned in a CHECK constraint named ``ck1``. In order to drop this column, we have to drop the check constraint also:: with self.op.batch_alter_table("some_table") as batch_op: batch_op.drop_constraint("ck1", "check") batch_op.drop_column('q') .. versionchanged:: 1.7 Named CHECK constraints participate in batch mode in the same way as any other kind of constraint. This requires that column drops or renames now include explicit directives to drop an existing named constraint which refers to this column, as it will otherwise not be automatically detected as being associated with that particular column. Unnamed CHECK constraints continue to be silently omitted from the table recreate operation. For **unnamed** CHECK constraints, these are still not automatically included as part of the batch process. Note that this limitation **includes** the CHECK constraints generated by the :class:`~sqlalchemy.types.Boolean` or :class:`~sqlalchemy.types.Enum` datatypes, which up through SQLAlchemy 1.3 would generate CHECK constraints automatically and cannot be tracked to the reflected table, assuming they are generated in an unnamed way. Unnamed constraints can be stated explicitly if they are to be included in the recreated table:: with op.batch_alter_table("some_table", table_args=[ CheckConstraint('x > 5') ]) as batch_op: batch_op.add_column(Column('foo', Integer)) batch_op.drop_column('bar') The above step needs only be taken for CHECK constraints that are explicitly stated as part of the table definition. For CHECK constraints that are generated by datatypes such as :class:`~sqlalchemy.types.Boolean` or :class:`~sqlalchemy.types.Enum`, the type objects themselves **must be named** in order for their CHECK constraints to be included in the batch process. Boolean and Enum datatypes that do not have the ``.name`` attribute set will **not** have CHECK constraints regenerated. This name can be set by specifying the ``.name`` parameter or by using a named Python ``Enum`` object as the source of enumeration. Dealing with Referencing Foreign Keys ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is important to note that batch table operations **do not work** with foreign keys that enforce referential integrity. This because the target table is dropped; if foreign keys refer to it, this will raise an error. On SQLite, whether or not foreign keys actually enforce is controlled by the ``PRAGMA FOREIGN KEYS`` pragma; this pragma, if in use, must be disabled when the workflow mode proceeds. When the operation is complete, the batch-migrated table will have the same name that it started with, so those referring foreign keys will again refer to this table. A special case is dealing with self-referring foreign keys. Here, Alembic takes a special step of recreating the self-referring foreign key as referring to the original table name, rather than at the "temp" table, so that like in the case of other foreign key constraints, when the table is renamed to its original name, the foreign key again references the correct table. This operation only works when referential integrity is disabled, consistent with the same requirement for referring foreign keys from other tables. When SQLite's ``PRAGMA FOREIGN KEYS`` mode is turned on, it does provide the service that foreign key constraints, including self-referential, will automatically be modified to point to their table across table renames, however this mode prevents the target table from being dropped as is required by a batch migration. Therefore it may be necessary to manipulate the ``PRAGMA FOREIGN KEYS`` setting if a migration seeks to rename a table vs. batch migrate it. .. _batch_offline_mode: Working in Offline Mode ----------------------- In the preceding sections, we've seen how much of an emphasis the "move and copy" process has on using reflection in order to know the structure of the table that is to be copied. This means that in the typical case, "online" mode, where a live database connection is present so that :meth:`.Operations.batch_alter_table` can reflect the table from the database, is required; the ``--sql`` flag **cannot** be used without extra steps. To support offline mode, the system must work without table reflection present, which means the full table as it intends to be created must be passed to :meth:`.Operations.batch_alter_table` using :paramref:`~.Operations.batch_alter_table.copy_from`:: meta = MetaData() some_table = Table( 'some_table', meta, Column('id', Integer, primary_key=True), Column('bar', String(50)) ) with op.batch_alter_table("some_table", copy_from=some_table) as batch_op: batch_op.add_column(Column('foo', Integer)) batch_op.drop_column('bar') The above use pattern is pretty tedious and quite far off from Alembic's preferred style of working; however, if one needs to do SQLite-compatible "move and copy" migrations and need them to generate flat SQL files in "offline" mode, there's not much alternative. Batch mode with Autogenerate ---------------------------- The syntax of batch mode is essentially that :meth:`.Operations.batch_alter_table` is used to enter a batch block, and the returned :class:`.BatchOperations` context works just like the regular :class:`.Operations` context, except that the "table name" and "schema name" arguments are omitted. To support rendering of migration commands in batch mode for autogenerate, configure the :paramref:`.EnvironmentContext.configure.render_as_batch` flag in ``env.py``:: context.configure( connection=connection, target_metadata=target_metadata, render_as_batch=True ) Autogenerate will now generate along the lines of:: def upgrade(): ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('address', schema=None) as batch_op: batch_op.add_column(sa.Column('street', sa.String(length=50), nullable=True)) This mode is safe to use in all cases, as the :meth:`.Operations.batch_alter_table` directive by default only takes place for SQLite; other backends will behave just as they normally do in the absence of the batch directives. Note that autogenerate support does not include "offline" mode, where the :paramref:`.Operations.batch_alter_table.copy_from` parameter is used. The table definition here would need to be entered into migration files manually if this is needed. Batch mode with databases other than SQLite -------------------------------------------- There's an odd use case some shops have, where the "move and copy" style of migration is useful in some cases for databases that do already support ALTER. There's some cases where an ALTER operation may block access to the table for a long time, which might not be acceptable. "move and copy" can be made to work on other backends, though with a few extra caveats. The batch mode directive will run the "recreate" system regardless of backend if the flag ``recreate='always'`` is passed:: with op.batch_alter_table("some_table", recreate='always') as batch_op: batch_op.add_column(Column('foo', Integer)) The issues that arise in this mode are mostly to do with constraints. Databases such as Postgresql and MySQL with InnoDB will enforce referential integrity (e.g. via foreign keys) in all cases. Unlike SQLite, it's not as simple to turn off referential integrity across the board (nor would it be desirable). Since a new table is replacing the old one, existing foreign key constraints which refer to the target table will need to be unconditionally dropped before the batch operation, and re-created to refer to the new table afterwards. Batch mode currently does not provide any automation for this. The Postgresql database and possibly others also have the behavior such that when the new table is created, a naming conflict occurs with the named constraints of the new table, in that they match those of the old table, and on Postgresql, these names need to be unique across all tables. The Postgresql dialect will therefore emit a "DROP CONSTRAINT" directive for all constraints on the old table before the new one is created; this is "safe" in case of a failed operation because Postgresql also supports transactional DDL. Note that also as is the case with SQLite, CHECK constraints need to be moved over between old and new table manually using the :paramref:`.Operations.batch_alter_table.table_args` parameter. alembic-rel_1_7_6/docs/build/branches.rst000066400000000000000000001046331417624537100205140ustar00rootroot00000000000000.. _branches: Working with Branches ===================== A **branch** describes a point in a migration stream when two or more versions refer to the same parent migration as their anscestor. Branches occur naturally when two divergent source trees, both containing Alembic revision files created independently within those source trees, are merged together into one. When this occurs, the challenge of a branch is to **merge** the branches into a single series of changes, so that databases established from either source tree individually can be upgraded to reference the merged result equally. Another scenario where branches are present are when we create them directly; either at some point in the migration stream we'd like different series of migrations to be managed independently (e.g. we create a tree), or we'd like separate migration streams for different features starting at the root (e.g. a *forest*). We'll illustrate all of these cases, starting with the most common which is a source-merge-originated branch that we'll merge. Starting with the "account table" example we began in :ref:`create_migration`, assume we have our basemost version ``1975ea83b712``, which leads into the second revision ``ae1027a6acf``, and the migration files for these two revisions are checked into our source repository. Consider if we merged into our source repository another code branch which contained a revision for another table called ``shopping_cart``. This revision was made against our first Alembic revision, the one that generated ``account``. After loading the second source tree in, a new file ``27c6a30d7c24_add_shopping_cart_table.py`` exists within our ``versions`` directory. Both it, as well as ``ae1027a6acf_add_a_column.py``, reference ``1975ea83b712_add_account_table.py`` as the "downgrade" revision. To illustrate:: # main source tree: 1975ea83b712 (create account table) -> ae1027a6acf (add a column) # branched source tree 1975ea83b712 (create account table) -> 27c6a30d7c24 (add shopping cart table) Above, we can see ``1975ea83b712`` is our **branch point**; two distinct versions both refer to it as its parent. The Alembic command ``branches`` illustrates this fact:: $ alembic branches --verbose Rev: 1975ea83b712 (branchpoint) Parent: Branches into: 27c6a30d7c24, ae1027a6acf Path: foo/versions/1975ea83b712_add_account_table.py create account table Revision ID: 1975ea83b712 Revises: Create Date: 2014-11-20 13:02:46.257104 -> 27c6a30d7c24 (head), add shopping cart table -> ae1027a6acf (head), add a column History shows it too, illustrating two ``head`` entries as well as a ``branchpoint``:: $ alembic history 1975ea83b712 -> 27c6a30d7c24 (head), add shopping cart table 1975ea83b712 -> ae1027a6acf (head), add a column -> 1975ea83b712 (branchpoint), create account table We can get a view of just the current heads using ``alembic heads``:: $ alembic heads --verbose Rev: 27c6a30d7c24 (head) Parent: 1975ea83b712 Path: foo/versions/27c6a30d7c24_add_shopping_cart_table.py add shopping cart table Revision ID: 27c6a30d7c24 Revises: 1975ea83b712 Create Date: 2014-11-20 13:03:11.436407 Rev: ae1027a6acf (head) Parent: 1975ea83b712 Path: foo/versions/ae1027a6acf_add_a_column.py add a column Revision ID: ae1027a6acf Revises: 1975ea83b712 Create Date: 2014-11-20 13:02:54.849677 If we try to run an ``upgrade`` to the usual end target of ``head``, Alembic no longer considers this to be an unambiguous command. As we have more than one ``head``, the ``upgrade`` command wants us to provide more information:: $ alembic upgrade head FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '@head' to narrow to a specific head, or 'heads' for all heads The ``upgrade`` command gives us quite a few options in which we can proceed with our upgrade, either giving it information on *which* head we'd like to upgrade towards, or alternatively stating that we'd like *all* heads to be upgraded towards at once. However, in the typical case of two source trees being merged, we will want to pursue a third option, which is that we can **merge** these branches. Merging Branches ---------------- An Alembic merge is a migration file that joins two or more "head" files together. If the two branches we have right now can be said to be a "tree" structure, introducing this merge file will turn it into a "diamond" structure:: -- ae1027a6acf --> / \ --> 1975ea83b712 --> --> mergepoint \ / -- 27c6a30d7c24 --> We create the merge file using ``alembic merge``; with this command, we can pass to it an argument such as ``heads``, meaning we'd like to merge all heads. Or, we can pass it individual revision numbers sequentally:: $ alembic merge -m "merge ae1 and 27c" ae1027 27c6a Generating /path/to/foo/versions/53fffde5ad5_merge_ae1_and_27c.py ... done Looking inside the new file, we see it as a regular migration file, with the only new twist is that ``down_revision`` points to both revisions:: """merge ae1 and 27c Revision ID: 53fffde5ad5 Revises: ae1027a6acf, 27c6a30d7c24 Create Date: 2014-11-20 13:31:50.811663 """ # revision identifiers, used by Alembic. revision = '53fffde5ad5' down_revision = ('ae1027a6acf', '27c6a30d7c24') branch_labels = None from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass This file is a regular migration file, and if we wish to, we may place :class:`.Operations` directives into the ``upgrade()`` and ``downgrade()`` functions like any other migration file. Though it is probably best to limit the instructions placed here only to those that deal with any kind of reconciliation that is needed between the two merged branches, if any. The ``heads`` command now illustrates that the multiple heads in our ``versions/`` directory have been resolved into our new head:: $ alembic heads --verbose Rev: 53fffde5ad5 (head) (mergepoint) Merges: ae1027a6acf, 27c6a30d7c24 Path: foo/versions/53fffde5ad5_merge_ae1_and_27c.py merge ae1 and 27c Revision ID: 53fffde5ad5 Revises: ae1027a6acf, 27c6a30d7c24 Create Date: 2014-11-20 13:31:50.811663 History shows a similar result, as the mergepoint becomes our head:: $ alembic history ae1027a6acf, 27c6a30d7c24 -> 53fffde5ad5 (head) (mergepoint), merge ae1 and 27c 1975ea83b712 -> ae1027a6acf, add a column 1975ea83b712 -> 27c6a30d7c24, add shopping cart table -> 1975ea83b712 (branchpoint), create account table With a single ``head`` target, a generic ``upgrade`` can proceed:: $ alembic upgrade head INFO [alembic.migration] Context impl PostgresqlImpl. INFO [alembic.migration] Will assume transactional DDL. INFO [alembic.migration] Running upgrade -> 1975ea83b712, create account table INFO [alembic.migration] Running upgrade 1975ea83b712 -> 27c6a30d7c24, add shopping cart table INFO [alembic.migration] Running upgrade 1975ea83b712 -> ae1027a6acf, add a column INFO [alembic.migration] Running upgrade ae1027a6acf, 27c6a30d7c24 -> 53fffde5ad5, merge ae1 and 27c .. topic:: merge mechanics The upgrade process traverses through all of our migration files using a **topological sorting** algorithm, treating the list of migration files not as a linked list, but as a **directed acyclic graph**. The starting points of this traversal are the **current heads** within our database, and the end point is the "head" revision or revisions specified. When a migration proceeds across a point at which there are multiple heads, the ``alembic_version`` table will at that point store *multiple* rows, one for each head. Our migration process above will emit SQL against ``alembic_version`` along these lines: .. sourcecode:: sql -- Running upgrade -> 1975ea83b712, create account table INSERT INTO alembic_version (version_num) VALUES ('1975ea83b712') -- Running upgrade 1975ea83b712 -> 27c6a30d7c24, add shopping cart table UPDATE alembic_version SET version_num='27c6a30d7c24' WHERE alembic_version.version_num = '1975ea83b712' -- Running upgrade 1975ea83b712 -> ae1027a6acf, add a column INSERT INTO alembic_version (version_num) VALUES ('ae1027a6acf') -- Running upgrade ae1027a6acf, 27c6a30d7c24 -> 53fffde5ad5, merge ae1 and 27c DELETE FROM alembic_version WHERE alembic_version.version_num = 'ae1027a6acf' UPDATE alembic_version SET version_num='53fffde5ad5' WHERE alembic_version.version_num = '27c6a30d7c24' At the point at which both ``27c6a30d7c24`` and ``ae1027a6acf`` exist within our database, both values are present in ``alembic_version``, which now has two rows. If we upgrade to these two versions alone, then stop and run ``alembic current``, we will see this:: $ alembic current --verbose Current revision(s) for postgresql://scott:XXXXX@localhost/test: Rev: ae1027a6acf Parent: 1975ea83b712 Path: foo/versions/ae1027a6acf_add_a_column.py add a column Revision ID: ae1027a6acf Revises: 1975ea83b712 Create Date: 2014-11-20 13:02:54.849677 Rev: 27c6a30d7c24 Parent: 1975ea83b712 Path: foo/versions/27c6a30d7c24_add_shopping_cart_table.py add shopping cart table Revision ID: 27c6a30d7c24 Revises: 1975ea83b712 Create Date: 2014-11-20 13:03:11.436407 A key advantage to the ``merge`` process is that it will run equally well on databases that were present on version ``ae1027a6acf`` alone, versus databases that were present on version ``27c6a30d7c24`` alone; whichever version was not yet applied, will be applied before the merge point can be crossed. This brings forth a way of thinking about a merge file, as well as about any Alembic revision file. As they are considered to be "nodes" within a set that is subject to topological sorting, each "node" is a point that cannot be crossed until all of its dependencies are satisfied. Prior to Alembic's support of merge points, the use case of databases sitting on different heads was basically impossible to reconcile; having to manually splice the head files together invariably meant that one migration would occur before the other, thus being incompatible with databases that were present on the other migration. Working with Explicit Branches ------------------------------ The ``alembic upgrade`` command hinted at other options besides merging when dealing with multiple heads. Let's back up and assume we're back where we have as our heads just ``ae1027a6acf`` and ``27c6a30d7c24``:: $ alembic heads 27c6a30d7c24 ae1027a6acf Earlier, when we did ``alembic upgrade head``, it gave us an error which suggested ``please specify a specific target revision, '@head' to narrow to a specific head, or 'heads' for all heads`` in order to proceed without merging. Let's cover those cases. Referring to all heads at once ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``heads`` identifier is a lot like ``head``, except it explicitly refers to *all* heads at once. That is, it's like telling Alembic to do the operation for both ``ae1027a6acf`` and ``27c6a30d7c24`` simultaneously. If we started from a fresh database and ran ``upgrade heads`` we'd see:: $ alembic upgrade heads INFO [alembic.migration] Context impl PostgresqlImpl. INFO [alembic.migration] Will assume transactional DDL. INFO [alembic.migration] Running upgrade -> 1975ea83b712, create account table INFO [alembic.migration] Running upgrade 1975ea83b712 -> ae1027a6acf, add a column INFO [alembic.migration] Running upgrade 1975ea83b712 -> 27c6a30d7c24, add shopping cart table Since we've upgraded to ``heads``, and we do in fact have more than one head, that means these two distinct heads are now in our ``alembic_version`` table. We can see this if we run ``alembic current``:: $ alembic current ae1027a6acf (head) 27c6a30d7c24 (head) That means there's two rows in ``alembic_version`` right now. If we downgrade one step at a time, Alembic will **delete** from the ``alembic_version`` table each branch that's closed out, until only one branch remains; then it will continue updating the single value down to the previous versions:: $ alembic downgrade -1 INFO [alembic.migration] Running downgrade ae1027a6acf -> 1975ea83b712, add a column $ alembic current 27c6a30d7c24 (head) $ alembic downgrade -1 INFO [alembic.migration] Running downgrade 27c6a30d7c24 -> 1975ea83b712, add shopping cart table $ alembic current 1975ea83b712 (branchpoint) $ alembic downgrade -1 INFO [alembic.migration] Running downgrade 1975ea83b712 -> , create account table $ alembic current Referring to a Specific Version ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We can pass a specific version number to ``upgrade``. Alembic will ensure that all revisions upon which this version depends are invoked, and nothing more. So if we ``upgrade`` either to ``27c6a30d7c24`` or ``ae1027a6acf`` specifically, it guarantees that ``1975ea83b712`` will have been applied, but not that any "sibling" versions are applied:: $ alembic upgrade 27c6a INFO [alembic.migration] Running upgrade -> 1975ea83b712, create account table INFO [alembic.migration] Running upgrade 1975ea83b712 -> 27c6a30d7c24, add shopping cart table With ``1975ea83b712`` and ``27c6a30d7c24`` applied, ``ae1027a6acf`` is just a single additional step:: $ alembic upgrade ae102 INFO [alembic.migration] Running upgrade 1975ea83b712 -> ae1027a6acf, add a column Working with Branch Labels ^^^^^^^^^^^^^^^^^^^^^^^^^^ To satisfy the use case where an environment has long-lived branches, especially independent branches as will be discussed in the next section, Alembic supports the concept of **branch labels**. These are string values that are present within the migration file, using the new identifier ``branch_labels``. For example, if we want to refer to the "shopping cart" branch using the name "shoppingcart", we can add that name to our file ``27c6a30d7c24_add_shopping_cart_table.py``:: """add shopping cart table """ # revision identifiers, used by Alembic. revision = '27c6a30d7c24' down_revision = '1975ea83b712' branch_labels = ('shoppingcart',) # ... The ``branch_labels`` attribute refers to a string name, or a tuple of names, which will now apply to this revision, all descendants of this revision, as well as all ancestors of this revision up until the preceding branch point, in this case ``1975ea83b712``. We can see the ``shoppingcart`` label applied to this revision:: $ alembic history 1975ea83b712 -> 27c6a30d7c24 (shoppingcart) (head), add shopping cart table 1975ea83b712 -> ae1027a6acf (head), add a column -> 1975ea83b712 (branchpoint), create account table With the label applied, the name ``shoppingcart`` now serves as an alias for the ``27c6a30d7c24`` revision specifically. We can illustrate this by showing it with ``alembic show``:: $ alembic show shoppingcart Rev: 27c6a30d7c24 (head) Parent: 1975ea83b712 Branch names: shoppingcart Path: foo/versions/27c6a30d7c24_add_shopping_cart_table.py add shopping cart table Revision ID: 27c6a30d7c24 Revises: 1975ea83b712 Create Date: 2014-11-20 13:03:11.436407 However, when using branch labels, we usually want to use them using a syntax known as "branch at" syntax; this syntax allows us to state that we want to use a specific revision, let's say a "head" revision, in terms of a *specific* branch. While normally, we can't refer to ``alembic upgrade head`` when there's multiple heads, we *can* refer to this head specifcally using ``shoppingcart@head`` syntax:: $ alembic upgrade shoppingcart@head INFO [alembic.migration] Running upgrade 1975ea83b712 -> 27c6a30d7c24, add shopping cart table The ``shoppingcart@head`` syntax becomes important to us if we wish to add new migration files to our versions directory while maintaining multiple branches. Just like the ``upgrade`` command, if we attempted to add a new revision file to our multiple-heads layout without a specific parent revision, we'd get a familiar error:: $ alembic revision -m "add a shopping cart column" FAILED: Multiple heads are present; please specify the head revision on which the new revision should be based, or perform a merge. The ``alembic revision`` command is pretty clear in what we need to do; to add our new revision specifically to the ``shoppingcart`` branch, we use the ``--head`` argument, either with the specific revision identifier ``27c6a30d7c24``, or more generically using our branchname ``shoppingcart@head``:: $ alembic revision -m "add a shopping cart column" --head shoppingcart@head Generating /path/to/foo/versions/d747a8a8879_add_a_shopping_cart_column.py ... done ``alembic history`` shows both files now part of the ``shoppingcart`` branch:: $ alembic history 1975ea83b712 -> ae1027a6acf (head), add a column 27c6a30d7c24 -> d747a8a8879 (shoppingcart) (head), add a shopping cart column 1975ea83b712 -> 27c6a30d7c24 (shoppingcart), add shopping cart table -> 1975ea83b712 (branchpoint), create account table We can limit our history operation just to this branch as well:: $ alembic history -r shoppingcart: 27c6a30d7c24 -> d747a8a8879 (shoppingcart) (head), add a shopping cart column 1975ea83b712 -> 27c6a30d7c24 (shoppingcart), add shopping cart table If we want to illustrate the path of ``shoppingcart`` all the way from the base, we can do that as follows:: $ alembic history -r :shoppingcart@head 27c6a30d7c24 -> d747a8a8879 (shoppingcart) (head), add a shopping cart column 1975ea83b712 -> 27c6a30d7c24 (shoppingcart), add shopping cart table -> 1975ea83b712 (branchpoint), create account table We can run this operation from the "base" side as well, but we get a different result:: $ alembic history -r shoppingcart@base: 1975ea83b712 -> ae1027a6acf (head), add a column 27c6a30d7c24 -> d747a8a8879 (shoppingcart) (head), add a shopping cart column 1975ea83b712 -> 27c6a30d7c24 (shoppingcart), add shopping cart table -> 1975ea83b712 (branchpoint), create account table When we list from ``shoppingcart@base`` without an endpoint, it's really shorthand for ``-r shoppingcart@base:heads``, e.g. all heads, and since ``shoppingcart@base`` is the same "base" shared by the ``ae1027a6acf`` revision, we get that revision in our listing as well. The ``@base`` syntax can be useful when we are dealing with individual bases, as we'll see in the next section. The ``@head`` format can also be used with revision numbers instead of branch names, though this is less convenient. If we wanted to add a new revision to our branch that includes the un-labeled ``ae1027a6acf``, if this weren't a head already, we could ask for the "head of the branch that includes ``ae1027a6acf``" as follows:: $ alembic revision -m "add another account column" --head ae10@head Generating /path/to/foo/versions/55af2cb1c267_add_another_account_column.py ... done More Label Syntaxes ^^^^^^^^^^^^^^^^^^^ The ``heads`` symbol can be combined with a branch label, in the case that your labeled branch itself breaks off into multiple branches:: $ alembic upgrade shoppingcart@heads Relative identifiers, as introduced in :ref:`relative_migrations`, work with labels too. For example, upgrading to ``shoppingcart@+2`` means to upgrade from current heads on "shoppingcart" upwards two revisions:: $ alembic upgrade shoppingcart@+2 This kind of thing works from history as well:: $ alembic history -r current:shoppingcart@+2 The newer ``relnum+delta`` format can be combined as well, for example if we wanted to list along ``shoppingcart`` up until two revisions before the head:: $ alembic history -r :shoppingcart@head-2 .. _multiple_bases: Working with Multiple Bases --------------------------- .. note:: The multiple base feature is intended to allow for multiple Alembic versioning lineages which **share the same alembic_version table**. This is so that individual revisions within the lineages can have cross-dependencies on each other. For the simpler case where one project has multiple, **completely independent** revision lineages that refer to **separate** alembic_version tables, see the example in :ref:`multiple_environments`. We've seen in the previous section that ``alembic upgrade`` is fine if we have multiple heads, ``alembic revision`` allows us to tell it which "head" we'd like to associate our new revision file with, and branch labels allow us to assign names to branches that we can use in subsequent commands. Let's put all these together and refer to a new "base", that is, a whole new tree of revision files that will be semi-independent of the account/shopping cart revisions we've been working with. This new tree will deal with database tables involving "networking". .. _multiple_version_directories: Setting up Multiple Version Directories ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ While optional, it is often the case that when working with multiple bases, we'd like different sets of version files to exist within their own directories; typically, if an application is organized into several sub-modules, each one would have a version directory containing migrations pertinent to that module. So to start out, we can edit ``alembic.ini`` to refer to multiple directories; we'll also state the current ``versions`` directory as one of them:: # A separator for the location paths must be defined first. version_path_separator = os # Use os.pathsep. # version location specification; this defaults # to foo/versions. When using multiple version # directories, initial revisions must be specified with --version-path version_locations = %(here)s/model/networking:%(here)s/alembic/versions The new directory ``%(here)s/model/networking`` is in terms of where the ``alembic.ini`` file is, as we are using the symbol ``%(here)s`` which resolves to this location. When we create our first new revision targeted at this directory, ``model/networking`` will be created automatically if it does not exist yet. Once we've created a revision here, the path is used automatically when generating subsequent revision files that refer to this revision tree. Creating a Labeled Base Revision ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We also want our new branch to have its own name, and for that we want to apply a branch label to the base. In order to achieve this using the ``alembic revision`` command without editing, we need to ensure our ``script.py.mako`` file, used for generating new revision files, has the appropriate substitutions present. If Alembic version 0.7.0 or greater was used to generate the original migration environment, this is already done. However when working with an older environment, ``script.py.mako`` needs to have this directive added, typically underneath the ``down_revision`` directive:: # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} # add this here in order to use revision with branch_label branch_labels = ${repr(branch_labels)} With this in place, we can create a new revision file, starting up a branch that will deal with database tables involving networking; we specify the ``--head`` version of ``base``, a ``--branch-label`` of ``networking``, and the directory we want this first revision file to be placed in with ``--version-path``:: $ alembic revision -m "create networking branch" --head=base --branch-label=networking --version-path=model/networking Creating directory /path/to/foo/model/networking ... done Generating /path/to/foo/model/networking/3cac04ae8714_create_networking_branch.py ... done If we ran the above command and we didn't have the newer ``script.py.mako`` directive, we'd get this error:: FAILED: Version 3cac04ae8714 specified branch_labels networking, however the migration file foo/model/networking/3cac04ae8714_create_networking_branch.py does not have them; have you upgraded your script.py.mako to include the 'branch_labels' section? When we receive the above error, and we would like to try again, we need to either **delete** the incorrectly generated file in order to run ``revision`` again, *or* we can edit the ``3cac04ae8714_create_networking_branch.py`` directly to add the ``branch_labels`` in of our choosing. Running with Multiple Bases ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Once we have a new, permanent (for as long as we desire it to be) base in our system, we'll always have multiple heads present:: $ alembic heads 3cac04ae8714 (networking) (head) 27c6a30d7c24 (shoppingcart) (head) ae1027a6acf (head) When we want to add a new revision file to ``networking``, we specify ``networking@head`` as the ``--head``. The appropriate version directory is now selected automatically based on the head we choose:: $ alembic revision -m "add ip number table" --head=networking@head Generating /path/to/foo/model/networking/109ec7d132bf_add_ip_number_table.py ... done It's important that we refer to the head using ``networking@head``; if we only refer to ``networking``, that refers to only ``3cac04ae8714`` specifically; if we specify this and it's not a head, ``alembic revision`` will make sure we didn't mean to specify the head:: $ alembic revision -m "add DNS table" --head=networking FAILED: Revision 3cac04ae8714 is not a head revision; please specify --splice to create a new branch from this revision As mentioned earlier, as this base is independent, we can view its history from the base using ``history -r networking@base:``:: $ alembic history -r networking@base: 109ec7d132bf -> 29f859a13ea (networking) (head), add DNS table 3cac04ae8714 -> 109ec7d132bf (networking), add ip number table -> 3cac04ae8714 (networking), create networking branch At the moment, this is the same output we'd get at this point if we used ``-r :networking@head``. However, that will change later on as we use additional directives. We may now run upgrades or downgrades freely, among individual branches (let's assume a clean database again):: $ alembic upgrade networking@head INFO [alembic.migration] Running upgrade -> 3cac04ae8714, create networking branch INFO [alembic.migration] Running upgrade 3cac04ae8714 -> 109ec7d132bf, add ip number table INFO [alembic.migration] Running upgrade 109ec7d132bf -> 29f859a13ea, add DNS table or against the whole thing using ``heads``:: $ alembic upgrade heads INFO [alembic.migration] Running upgrade -> 1975ea83b712, create account table INFO [alembic.migration] Running upgrade 1975ea83b712 -> 27c6a30d7c24, add shopping cart table INFO [alembic.migration] Running upgrade 27c6a30d7c24 -> d747a8a8879, add a shopping cart column INFO [alembic.migration] Running upgrade 1975ea83b712 -> ae1027a6acf, add a column INFO [alembic.migration] Running upgrade ae1027a6acf -> 55af2cb1c267, add another account column Branch Dependencies ------------------- When working with multiple roots, it is expected that these different revision streams will need to refer to one another. For example, a new revision in ``networking`` which needs to refer to the ``account`` table will want to establish ``55af2cb1c267, add another account column``, the last revision that works with the account table, as a dependency. From a graph perspective, this means nothing more that the new file will feature both ``55af2cb1c267, add another account column`` and ``29f859a13ea, add DNS table`` as "down" revisions, and looks just as though we had merged these two branches together. However, we don't want to consider these as "merged"; we want the two revision streams to *remain independent*, even though a version in ``networking`` is going to reach over into the other stream. To support this use case, Alembic provides a directive known as ``depends_on``, which allows a revision file to refer to another as a "dependency", very similar to an entry in ``down_revision`` from a graph perspective, but different from a semantic perspective. To use ``depends_on``, we can specify it as part of our ``alembic revision`` command:: $ alembic revision -m "add ip account table" --head=networking@head --depends-on=55af2cb1c267 Generating /path/to/foo/model/networking/2a95102259be_add_ip_account_table.py ... done Within our migration file, we'll see this new directive present:: # revision identifiers, used by Alembic. revision = '2a95102259be' down_revision = '29f859a13ea' branch_labels = None depends_on='55af2cb1c267' ``depends_on`` may be either a real revision number or a branch name. When specified at the command line, a resolution from a partial revision number will work as well. It can refer to any number of dependent revisions as well; for example, if we were to run the command:: $ alembic revision -m "add ip account table" \\ --head=networking@head \\ --depends-on=55af2cb1c267 --depends-on=d747a --depends-on=fa445 Generating /path/to/foo/model/networking/2a95102259be_add_ip_account_table.py ... done We'd see inside the file:: # revision identifiers, used by Alembic. revision = '2a95102259be' down_revision = '29f859a13ea' branch_labels = None depends_on = ('55af2cb1c267', 'd747a8a8879', 'fa4456a9201') We also can of course add or alter this value within the file manually after it is generated, rather than using the ``--depends-on`` argument. We can see the effect this directive has when we view the history of the ``networking`` branch in terms of "heads", e.g., all the revisions that are descendants:: $ alembic history -r :networking@head 29f859a13ea (55af2cb1c267) -> 2a95102259be (networking) (head), add ip account table 109ec7d132bf -> 29f859a13ea (networking), add DNS table 3cac04ae8714 -> 109ec7d132bf (networking), add ip number table -> 3cac04ae8714 (networking), create networking branch ae1027a6acf -> 55af2cb1c267 (effective head), add another account column 1975ea83b712 -> ae1027a6acf, Add a column -> 1975ea83b712 (branchpoint), create account table What we see is that the full history of the ``networking`` branch, in terms of an "upgrade" to the "head", will include that the tree building up ``55af2cb1c267, add another account column`` will be pulled in first. Interstingly, we don't see this displayed when we display history in the other direction, e.g. from ``networking@base``:: $ alembic history -r networking@base: 29f859a13ea (55af2cb1c267) -> 2a95102259be (networking) (head), add ip account table 109ec7d132bf -> 29f859a13ea (networking), add DNS table 3cac04ae8714 -> 109ec7d132bf (networking), add ip number table -> 3cac04ae8714 (networking), create networking branch The reason for the discrepancy is that displaying history from the base shows us what would occur if we ran a downgrade operation, instead of an upgrade. If we downgraded all the files in ``networking`` using ``networking@base``, the dependencies aren't affected, they're left in place. We also see something odd if we view ``heads`` at the moment:: $ alembic heads 2a95102259be (networking) (head) 27c6a30d7c24 (shoppingcart) (head) 55af2cb1c267 (effective head) The head file that we used as a "dependency", ``55af2cb1c267``, is displayed as an "effective" head, which we can see also in the history display earlier. What this means is that at the moment, if we were to upgrade all versions to the top, the ``55af2cb1c267`` revision number would not actually be present in the ``alembic_version`` table; this is because it does not have a branch of its own subsequent to the ``2a95102259be`` revision which depends on it:: $ alembic upgrade heads INFO [alembic.migration] Running upgrade 29f859a13ea, 55af2cb1c267 -> 2a95102259be, add ip account table $ alembic current 2a95102259be (head) 27c6a30d7c24 (head) The entry is still displayed in ``alembic heads`` because Alembic knows that even though this revision isn't a "real" head, it's still something that we developers consider semantically to be a head, so it's displayed, noting its special status so that we don't get quite as confused when we don't see it within ``alembic current``. If we add a new revision onto ``55af2cb1c267``, the branch again becomes a "real" branch which can have its own entry in the database:: $ alembic revision -m "more account changes" --head=55af2cb@head Generating /path/to/foo/versions/34e094ad6ef1_more_account_changes.py ... done $ alembic upgrade heads INFO [alembic.migration] Running upgrade 55af2cb1c267 -> 34e094ad6ef1, more account changes $ alembic current 2a95102259be (head) 27c6a30d7c24 (head) 34e094ad6ef1 (head) For posterity, the revision tree now looks like:: $ alembic history 29f859a13ea (55af2cb1c267) -> 2a95102259be (networking) (head), add ip account table 109ec7d132bf -> 29f859a13ea (networking), add DNS table 3cac04ae8714 -> 109ec7d132bf (networking), add ip number table -> 3cac04ae8714 (networking), create networking branch 1975ea83b712 -> 27c6a30d7c24 (shoppingcart) (head), add shopping cart table 55af2cb1c267 -> 34e094ad6ef1 (head), more account changes ae1027a6acf -> 55af2cb1c267, add another account column 1975ea83b712 -> ae1027a6acf, Add a column -> 1975ea83b712 (branchpoint), create account table --- 27c6 --> d747 --> / (shoppingcart) --> 1975 --> \ --- ae10 --> 55af --> ^ +--------+ (dependency) | | --> 3782 -----> 109e ----> 29f8 ---> 2a95 --> (networking) If there's any point to be made here, it's if you are too freely branching, merging and labeling, things can get pretty crazy! Hence the branching system should be used carefully and thoughtfully for best results. alembic-rel_1_7_6/docs/build/changelog.rst000066400000000000000000005470221417624537100206610ustar00rootroot00000000000000 ========== Changelog ========== .. changelog:: :version: 1.7.6 :released: February 1, 2022 .. change:: :tags: bug, batch, regression :tickets: 982 Fixed regression where usage of a ``with_variant()`` datatype in conjunction with the ``existing_type`` option of ``op.alter_column()`` under batch mode would lead to an internal exception. .. change:: :tags: usecase, commands :tickets: 964 Add a new command ``alembic ensure_version``, which will ensure that the Alembic version table is present in the target database, but does not alter its contents. Pull request courtesy Kai Mueller. .. change:: :tags: bug, autogenerate Implemented support for recognizing and rendering SQLAlchemy "variant" types going forward into SQLAlchemy 2.0, where the architecture of "variant" datatypes will be changing. .. change:: :tags: bug, mysql, autogenerate :tickets: 968 Added a rule to the MySQL impl so that the translation between JSON / LONGTEXT is accommodated by autogenerate, treating LONGTEXT from the server as equivalent to an existing JSON in the model. .. change:: :tags: mssql Removed a warning raised by SQLAlchemy when dropping constraints on MSSQL regarding statement caching. .. changelog:: :version: 1.7.5 :released: November 11, 2021 .. change:: :tags: bug, tests Adjustments to the test suite to accommodate for error message changes occurring as of SQLAlchemy 1.4.27. .. changelog:: :version: 1.7.4 :released: October 6, 2021 .. change:: :tags: bug, regression :tickets: 934 Fixed a regression that prevented the use of post write hooks on python version lower than 3.9 .. change:: :tags: bug, environment :tickets: 944 Fixed issue where the :meth:`.MigrationContext.autocommit_block` feature would fail to function when using a SQLAlchemy engine using 2.0 future mode. .. changelog:: :version: 1.7.3 :released: September 17, 2021 .. change:: :tags: bug, mypy :tickets: 914 Fixed type annotations for the "constraint_name" argument of operations ``create_primary_key()``, ``create_foreign_key()``. Pull request courtesy TilmanK. .. changelog:: :version: 1.7.2 :released: September 17, 2021 .. change:: :tags: bug, typing :tickets: 900 Added missing attributes from context stubs. .. change:: :tags: bug, mypy :tickets: 897 Fixed an import in one of the .pyi files that was triggering an assertion error in some versions of mypy. .. change:: :tags: bug, regression, ops :tickets: 920 Fixed issue where registration of custom ops was prone to failure due to the registration process running ``exec()`` on generated code that as of the 1.7 series includes pep-484 annotations, which in the case of end user code would result in name resolution errors when the exec occurs. The logic in question has been altered so that the annotations are rendered as forward references so that the ``exec()`` can proceed. .. changelog:: :version: 1.7.1 :released: August 30, 2021 .. change:: :tags: bug, installation :tickets: 893 Corrected "universal wheel" directive in setup.cfg so that building a wheel does not target Python 2. The PyPi files index for 1.7.0 was corrected manually. Pull request courtesy layday. .. change:: :tags: bug, pep484 :tickets: 895 Fixed issue in generated .pyi files where default values for ``Optional`` arguments were missing, thereby causing mypy to consider them as required. .. change:: :tags: bug, regression, batch :tickets: 896 Fixed regression in batch mode due to :ticket:`883` where the "auto" mode of batch would fail to accommodate any additional migration directives beyond encountering an ``add_column()`` directive, due to a mis-application of the conditional logic that was added as part of this change, leading to "recreate" mode not being used in cases where it is required for SQLite such as for unique constraints. .. changelog:: :version: 1.7.0 :released: August 30, 2021 .. change:: :tags: bug, operations :tickets: 879 Fixed regression due to :ticket:`803` where the ``.info`` and ``.comment`` attributes of ``Table`` would be lost inside of the :class:`.DropTableOp` class, which when "reversed" into a :class:`.CreateTableOp` would then have lost these elements. Pull request courtesy Nicolas CANIART. .. change:: :tags: feature, environment :tickets: 842 Enhance ``version_locations`` parsing to handle paths containing spaces. The new configuration option ``version_path_separator`` specifies the character to use when splitting the ``version_locations`` string. The default for new configurations is ``version_path_separator = os``, which will use ``os.pathsep`` (e.g., ``;`` on Windows). .. change:: :tags: installation, changed Alembic 1.7 now supports Python 3.6 and above; support for prior versions including Python 2.7 has been dropped. .. change:: :tags: bug, sqlite, batch :tickets: 883 Batch "auto" mode will now select for "recreate" if the ``add_column()`` operation is used on SQLite, and the column itself meets the criteria for SQLite where ADD COLUMN is not allowed, in this case a functional or parenthesized SQL expression or a ``Computed`` (i.e. generated) column. .. change:: :tags: changed, installation :tickets: 674 Make the ``python-dateutil`` library an optional dependency. This library is only required if the ``timezone`` option is used in the Alembic configuration. An extra require named ``tz`` is available with ``pip install alembic[tz]`` to install it. .. change:: :tags: bug, commands :tickets: 856 Re-implemented the ``python-editor`` dependency as a small internal function to avoid the need for external dependencies. .. change:: :tags: usecase, batch :tickets: 884 Named CHECK constraints are now supported by batch mode, and will automatically be part of the recreated table assuming they are named. They also can be explicitly dropped using ``op.drop_constraint()``. For "unnamed" CHECK constraints, these are still skipped as they cannot be distinguished from the CHECK constraints that are generated by the ``Boolean`` and ``Enum`` datatypes. Note that this change may require adjustments to migrations that drop or rename columns which feature an associated named check constraint, such that an additional ``op.drop_constraint()`` directive should be added for that named constraint as there will no longer be an associated column for it; for the ``Boolean`` and ``Enum`` datatypes, an ``existing_type`` keyword may be passed to ``BatchOperations.drop_constraint`` as well. .. seealso:: :ref:`batch_schematype_constraints` :ref:`batch_check_constraints` .. change:: :tags: changed, installation :tickets: 885 The dependency on ``pkg_resources`` which is part of ``setuptools`` has been removed, so there is no longer any runtime dependency on ``setuptools``. The functionality has been replaced with ``importlib.metadata`` and ``importlib.resources`` which are both part of Python std.lib, or via pypy dependency ``importlib-metadata`` for Python version < 3.8 and ``importlib-resources`` for Python version < 3.9 (while importlib.resources was added to Python in 3.7, it did not include the "files" API until 3.9). .. change:: :tags: feature, tests :tickets: 855 Created a "test suite" similar to the one for SQLAlchemy, allowing developers of third-party dialects to test their code against a set of Alembic tests that have been specially selected to exercise back-end database operations. At the time of release, third-party dialects that have adopted the Alembic test suite to verify compatibility include `CockroachDB `_ and `SAP ASE (Sybase) `_. .. change:: :tags: bug, postgresql :tickets: 874 Fixed issue where usage of the PostgreSQL ``postgresql_include`` option within a :meth:`.Operations.create_index` would raise a KeyError, as the additional column(s) need to be added to the table object used by the construct internally. The issue is equivalent to the SQL Server issue fixed in :ticket:`513`. Pull request courtesy Steven Bronson. .. change:: :tags: feature, general pep-484 type annotations have been added throughout the library. Additionally, stub .pyi files have been added for the "dynamically" generated Alembic modules ``alembic.op`` and ``alembic.config``, which include complete function signatures and docstrings, so that the functions in these namespaces will have both IDE support (vscode, pycharm, etc) as well as support for typing tools like Mypy. The files themselves are statically generated from their source functions within the source tree. .. changelog:: :version: 1.6.5 :released: May 27, 2021 .. change:: :tags: bug, autogenerate :tickets: 849 Fixed issue where dialect-specific keyword arguments within the :class:`.DropIndex` operation directive would not render in the autogenerated Python code. As support was improved for adding dialect specific arguments to directives as part of :ticket:`803`, in particular arguments such as "postgresql_concurrently" which apply to the actual create/drop of the index, support was needed for these to render even in a drop index operation. Pull request courtesy Jet Zhou. .. changelog:: :version: 1.6.4 :released: May 24, 2021 .. change:: :tags: bug, regression, op directives :tickets: 848 Fixed regression caused by just fixed :ticket:`844` that scaled back the filter for ``unique=True/index=True`` too far such that these directives no longer worked for the ``op.create_table()`` op, this has been fixed. .. changelog:: :version: 1.6.3 :released: May 21, 2021 .. change:: :tags: bug, regression, autogenerate :tickets: 844 Fixed 1.6-series regression where ``UniqueConstraint`` and to a lesser extent ``Index`` objects would be doubled up in the generated model when the ``unique=True`` / ``index=True`` flags were used. .. change:: :tags: bug, autogenerate :tickets: 839 Fixed a bug where paths defined in post-write hook options would be wrongly escaped in non posix environment (Windows). .. change:: :tags: bug, regression, versioning :tickets: 843 Fixed regression where a revision file that contained its own down revision as a dependency would cause an endless loop in the traversal logic. .. changelog:: :version: 1.6.2 :released: May 6, 2021 .. change:: :tags: bug, versioning, regression :tickets: 839 Fixed additional regression nearly the same as that of :ticket:`838` just released in 1.6.1 but within a slightly different codepath, where "alembic downgrade head" (or equivalent) would fail instead of iterating no revisions. .. changelog:: :version: 1.6.1 :released: May 6, 2021 .. change:: :tags: bug, versioning, regression :tickets: 838 Fixed regression in new revisioning traversal where "alembic downgrade base" would fail if the database itself were clean and unversioned; additionally repairs the case where downgrade would fail if attempting to downgrade to the current head that is already present. .. changelog:: :version: 1.6.0 :released: May 3, 2021 .. change:: :tags: bug, autogenerate :tickets: 803 Refactored the implementation of :class:`.MigrateOperation` constructs such as :class:`.CreateIndexOp`, :class:`.CreateTableOp`, etc. so that they no longer rely upon maintaining a persistent version of each schema object internally; instead, the state variables of each operation object will be used to produce the corresponding construct when the operation is invoked. The rationale is so that environments which make use of operation-manipulation schemes such as those those discussed in :ref:`autogen_rewriter` are better supported, allowing end-user code to manipulate the public attributes of these objects which will then be expressed in the final output, an example is ``some_create_index_op.kw["postgresql_concurrently"] = True``. Previously, these objects when generated from autogenerate would typically hold onto the original, reflected element internally without honoring the other state variables of each construct, preventing the public API from working. .. change:: :tags: bug, environment :tickets: 829 Fixed regression caused by the SQLAlchemy 1.4/2.0 compatibility switch where calling ``.rollback()`` or ``.commit()`` explicitly within the ``context.begin_transaction()`` context manager would cause it to fail when the block ended, as it did not expect that the transaction was manually closed. .. change:: :tags: bug, autogenerate :tickets: 827 Improved the rendering of ``op.add_column()`` operations when adding multiple columns to an existing table, so that the order of these statements matches the order in which the columns were declared in the application's table metadata. Previously the added columns were being sorted alphabetically. .. change:: :tags: feature, autogenerate :tickets: 819 Fix the documentation regarding the default command-line argument position of the revision script filename within the post-write hook arguments. Implement a ``REVISION_SCRIPT_FILENAME`` token, enabling the position to be changed. Switch from ``str.split()`` to ``shlex.split()`` for more robust command-line argument parsing. .. change:: :tags: feature :tickets: 822 Implement a ``.cwd`` (current working directory) suboption for post-write hooks (of type ``console_scripts``). This is useful for tools like pre-commit, which rely on the working directory to locate the necessary config files. Add pre-commit as an example to the documentation. Minor change: rename some variables from ticket #819 to improve readability. .. change:: :tags: bug, versioning :tickets: 765, 464 The algorithm used for calculating downgrades/upgrades/iterating revisions has been rewritten, to resolve ongoing issues of branches not being handled consistently particularly within downgrade operations, as well as for overall clarity and maintainability. This change includes that a deprecation warning is emitted if an ambiguous command such as "downgrade -1" when multiple heads are present is given. In particular, the change implements a long-requested use case of allowing downgrades of a single branch to a branchpoint. Huge thanks to Simon Bowly for their impressive efforts in successfully tackling this very difficult problem. .. change:: :tags: bug, batch :tickets: 799 Added missing ``batch_op.create_table_comment()``, ``batch_op.drop_table_comment()`` directives to batch ops. .. changelog:: :version: 1.5.8 :released: March 23, 2021 .. change:: :tags: bug, environment :tickets: 816 Fixed regression caused by SQLAlchemy 1.4 where the "alembic current" command would fail due to changes in the ``URL`` object. .. changelog:: :version: 1.5.7 :released: March 11, 2021 .. change:: :tags: bug, autogenerate :tickets: 813 Adjusted the recently added :paramref:`.EnvironmentContext.configure.include_name` hook to accommodate for additional object types such as "views" that don't have a parent table, to support third party recipes and extensions. Pull request courtesy Oliver Rice. .. changelog:: :version: 1.5.6 :released: March 5, 2021 .. change:: :tags: bug, mssql, operations :tickets: 812 Fixed bug where the "existing_type" parameter, which the MSSQL dialect requires in order to change the nullability of a column in the absence of also changing the column type, would cause an ALTER COLUMN operation to incorrectly render a second ALTER statement without the nullability if a new type were also present, as the MSSQL-specific contract did not anticipate all three of "nullability", "type_" and "existing_type" being sent at the same time. .. change:: :tags: template :ticket: 805 Add async template to Alembic to bootstrap environments that use async DBAPI. Updated the cookbook to include a migration guide on how to adapt an existing environment for use with DBAPI drivers. .. changelog:: :version: 1.5.5 :released: February 20, 2021 .. change:: :tags: bug Adjusted the use of SQLAlchemy's ".copy()" internals to use "._copy()" for version 1.4.0, as this method is being renamed. .. change:: :tags: bug, environment :tickets: 797 Added new config file option ``prepend_sys_path``, which is a series of paths that will be prepended to sys.path; the default value in newly generated alembic.ini files is ".". This fixes a long-standing issue where for some reason running the alembic command line would not place the local "." path in sys.path, meaning an application locally present in "." and importable through normal channels, e.g. python interpreter, pytest, etc. would not be located by Alembic, even though the ``env.py`` file is loaded relative to the current path when ``alembic.ini`` contains a relative path. To enable for existing installations, add the option to the alembic.ini file as follows:: # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . .. seealso:: :ref:`installation` - updated documentation reflecting that local installation of the project is not necessary if running the Alembic cli from the local path. .. changelog:: :version: 1.5.4 :released: February 3, 2021 .. change:: :tags: bug, versioning :tickets: 789 Fixed bug in versioning model where a downgrade across a revision with a dependency on another branch, yet an ancestor is also dependent on that branch, would produce an erroneous state in the alembic_version table, making upgrades impossible without manually repairing the table. .. changelog:: :version: 1.5.3 :released: January 29, 2021 .. change:: :tags: bug, autogenerate :tickets: 786 Changed the default ordering of "CREATE" and "DROP" statements indexes and unique constraints within the autogenerate process, so that for example in an upgrade() operation, a particular index or constraint that is to be replaced such as for a casing convention change will not produce any naming conflicts. For foreign key constraint objects, this is already how constraints are ordered, and for table objects, users would normally want to use :meth:`.Operations.rename_table` in any case. .. change:: :tags: bug, autogenerate, mssql :tickets: 787 Fixed assorted autogenerate issues with SQL Server: * ignore default reflected identity on primary_key columns * improve server default comparison .. change:: :tags: bug, mysql, autogenerate :tickets: 788 Fixed issue where autogenerate rendering of ``op.alter_column()`` would fail to include MySQL ``existing_nullable=False`` if the column were part of a primary key constraint within the table metadata. .. changelog:: :version: 1.5.2 :released: January 20, 2021 .. change:: :tags: bug, versioning, regression :tickets: 784 Fixed regression where new "loop detection" feature introduced in :ticket:`757` produced false positives for revision names that have overlapping substrings between revision number and down revision and/or dependency, if the downrev/dependency were not in sequence form. .. change:: :tags: bug, environment :tickets: 782 Fixed regression where Alembic would fail to create a transaction properly if the :class:`sqlalchemy.engine.Connection` were a so-called "branched" connection, that is, one where the ``.connect()`` method had been called to create a "sub" connection. .. changelog:: :version: 1.5.1 :released: January 19, 2021 .. change:: :tags: bug, installation, commands :tickets: 780 Fixed installation issue where the "templates" directory was not being installed, preventing commands like "list_templates" and "init" from working. .. changelog:: :version: 1.5.0 :released: January 18, 2021 .. change:: :tags: usecase, operations :tickets: 730 Added support for rendering of "identity" elements on :class:`.Column` objects, supported in SQLAlchemy via the :class:`.Identity` element introduced in version 1.4. Adding columns with identity is supported on PostgreSQL, MSSQL and Oracle. Changing the identity options or removing it is supported only on PostgreSQL and Oracle. .. change:: :tags: changed, environment To accommodate SQLAlchemy 1.4 and 2.0, the migration model now no longer assumes that the SQLAlchemy Connection will autocommit an individual operation. This essentially means that for databases that use non-transactional DDL (pysqlite current driver behavior, MySQL), there is still a BEGIN/COMMIT block that will surround each individual migration. Databases that support transactional DDL should continue to have the same flow, either per migration or per-entire run, depending on the value of the :paramref:`.Environment.configure.transaction_per_migration` flag. .. change:: :tags: changed, environment A :class:`.CommandError` is raised if a ``sqlalchemy.engine.Engine`` is passed to the :meth:`.MigrationContext.configure` method instead of a ``sqlalchemy.engine.Connection`` object. Previously, this would be a warning only. .. change:: :tags: bug, operations :tickets: 753 Modified the ``add_column()`` operation such that the ``Column`` object in use is shallow copied to a new instance if that ``Column`` is already attached to a ``table()`` or ``Table``. This accommodates for the change made in SQLAlchemy issue #5618 which prohibits a ``Column`` from being associated with multiple ``table()`` objects. This resumes support for using a ``Column`` inside of an Alembic operation that already refers to a parent ``table()`` or ``Table`` as well as allows operation objects just autogenerated to work. .. change:: :tags: feature, autogenerate :tickets: 650 Added new hook :paramref:`.EnvironmentContext.configure.include_name`, which complements the :paramref:`.EnvironmentContext.configure.include_object` hook by providing a means of preventing objects of a certain name from being autogenerated **before** the SQLAlchemy reflection process takes place, and notably includes explicit support for passing each schema name when :paramref:`.EnvironmentContext.configure.include_schemas` is set to True. This is most important especially for environments that make use of :paramref:`.EnvironmentContext.configure.include_schemas` where schemas are actually databases (e.g. MySQL) in order to prevent reflection sweeps of the entire server. .. seealso:: :ref:`autogenerate_include_hooks` - new documentation section .. change:: :tags: removed, autogenerate The long deprecated :paramref:`.EnvironmentContext.configure.include_symbol` hook is removed. The :paramref:`.EnvironmentContext.configure.include_object` and :paramref:`.EnvironmentContext.configure.include_name` hooks both achieve the goals of this hook. .. change:: :tags: bug, autogenerate :tickets: 721 Added rendering for the ``Table.prefixes`` element to autogenerate so that the rendered Python code includes these directives. Pull request courtesy Rodrigo Ce Moretto. .. change:: :tags: bug, batch :tickets: 761 Added missing "create comment" feature for columns that are altered in batch migrations. .. change:: :tags: changed :tickets: 748 Alembic 1.5.0 now supports **Python 2.7 and Python 3.6 and above**, as well as **SQLAlchemy 1.3.0 and above**. Support is removed for Python 3 versions prior to 3.6 and SQLAlchemy versions prior to the 1.3 series. .. change:: :tags: bug, batch :tickets: 773 Made an adjustment to the PostgreSQL dialect to allow it to work more effectively in batch mode, where a datatype like Boolean or non-native Enum that may have embedded rules to generate CHECK constraints will be more correctly handled in that these constraints usually will not have been generated on the PostgreSQL backend; previously it would inadvertently assume they existed unconditionally in a special PG-only "drop constraint" step. .. change:: :tags: feature, versioning :tickets: 757 The revision tree is now checked for cycles and loops between revision files when the revision environment is loaded up. Scenarios such as a revision pointing to itself, or a revision that can reach itself via a loop, are handled and will raise the :class:`.CycleDetected` exception when the environment is loaded (expressed from the Alembic commandline as a failure message and nonzero return code). Previously, these situations were silently ignored up front, and the behavior of revision traversal would either be silently incorrect, or would produce errors such as :class:`.RangeNotAncestorError`. Pull request courtesy Koichiro Den. .. change:: :tags: usecase, commands Add ``__main__.py`` file to alembic package to support invocation with ``python -m alembic``. .. change:: :tags: removed, commands Removed deprecated ``--head_only`` option to the ``alembic current`` command .. change:: :tags: removed, operations Removed legacy parameter names from operations, these have been emitting warnings since version 0.8. In the case that legacy version files have not yet been updated, these can be modified directly in order to maintain compatibility: * :meth:`.Operations.drop_constraint` - "type" (use "type_") and "name" (use "constraint_name") * :meth:`.Operations.create_primary_key` - "cols" (use "columns") and "name" (use "constraint_name") * :meth:`.Operations.create_unique_constraint` - "name" (use "constraint_name"), "source" (use "table_name") and "local_cols" (use "columns") * :meth:`.Operations.batch_create_unique_constraint` - "name" (use "constraint_name") * :meth:`.Operations.create_foreign_key` - "name" (use "constraint_name"), "source" (use "source_table"), "referent" (use "referent_table") * :meth:`.Operations.batch_create_foreign_key` - "name" (use "constraint_name"), "referent" (use "referent_table") * :meth:`.Operations.create_check_constraint` - "name" (use "constraint_name"), "source" (use "table_name") * :meth:`.Operations.batch_create_check_constraint` - "name" (use "constraint_name") * :meth:`.Operations.create_index` - "name" (use "index_name") * :meth:`.Operations.drop_index` - "name" (use "index_name"), "tablename" (use "table_name") * :meth:`.Operations.batch_drop_index` - "name" (use "index_name"), * :meth:`.Operations.create_table` - "name" (use "table_name") * :meth:`.Operations.drop_table` - "name" (use "table_name") * :meth:`.Operations.alter_column` - "name" (use "new_column_name") .. changelog:: :version: 1.4.3 :released: September 11, 2020 .. change:: :tags: bug, sqlite, batch :tickets: 711 Added support to drop named CHECK constraints that are specified as part of a column, rather than table wide. Previously, only constraints associated with the table were considered. .. change:: :tags: bug, ops, mysql :tickets: 736 Fixed issue where the MySQL dialect would not correctly render the server default of a column in an alter operation, if the operation were programmatically generated from an autogenerate pass as it would not accommodate for the full structure of the DefaultClause construct. .. change:: :tags: bug, sqlite, batch :tickets: 697 Fixed issue where the CAST applied to a JSON column when copying a SQLite table during batch mode would cause the data to be lost, as SQLite's CAST with JSON appears to convert the data to the value "0". The CAST is now skipped in a dialect-specific manner, including for JSON columns on SQLite. Pull request courtesy Sebastián Ramírez. .. change:: :tags: bug, commands :tickets: 694 The ``alembic current`` command no longer creates an ``alembic_version`` table in the database if one does not exist already, returning no version as the current version. This allows checking for migrations in parallel without introducing race conditions. Pull request courtesy Nikolay Edigaryev. .. change:: :tags: bug, batch Fixed issue where columns in a foreign-key referenced table would be replaced with null-type columns during a batch operation; while this did not generally have any side effects, it could theoretically impact a batch operation that also targets that table directly and also would interfere with future changes to the ``.append_column()`` method to disallow implicit replacement of columns. .. change:: :tags: bug, mssql :tickets: 716 Fixed issue where the ``mssql_drop_foreign_key=True`` flag on ``op.drop_column`` would lead to incorrect syntax error due to a typo in the SQL emitted, same typo was present in the test as well so it was not detected. Pull request courtesy Oleg Shigorin. .. changelog:: :version: 1.4.2 :released: March 19, 2020 .. change:: :tags: usecase, autogenerate :tickets: 669 Adjusted autogen comparison to accommodate for backends that support computed column reflection, dependent on SQLAlchemy version 1.3.16 or higher. This emits a warning if the SQL expression inside of a :class:`.Computed` value changes between the metadata and the database, as these expressions can't be changed without dropping and recreating the column. .. change:: :tags: bug, tests :tickets: 668 Fixed an issue that prevented the test suite from running with the recently released py.test 5.4.0. .. change:: :tags: bug, autogenerate, mysql :tickets: 671 Fixed more false-positive failures produced by the new "compare type" logic first added in :ticket:`605`, particularly impacting MySQL string types regarding flags such as "charset" and "collation". .. change:: :tags: bug, op directives, oracle :tickets: 670 Fixed issue in Oracle backend where a table RENAME with a schema-qualified name would include the schema in the "to" portion, which is rejected by Oracle. .. changelog:: :version: 1.4.1 :released: March 1, 2020 .. change:: :tags: bug, autogenerate :tickets: 661 Fixed regression caused by the new "type comparison" logic introduced in 1.4 as part of :ticket:`605` where comparisons of MySQL "unsigned integer" datatypes would produce false positives, as the regular expression logic was not correctly parsing the "unsigned" token when MySQL's default display width would be returned by the database. Pull request courtesy Paul Becotte. .. change:: :tags: bug, environment :tickets: 663 Error message for "path doesn't exist" when loading up script environment now displays the absolute path. Pull request courtesy Rowan Hart. .. change:: :tags: bug, autogenerate :tickets: 654 Fixed regression in 1.4.0 due to :ticket:`647` where unique constraint comparison with mixed case constraint names while not using a naming convention would produce false positives during autogenerate. .. change:: :tags: bug, environment The check for matched rowcount when the alembic_version table is updated or deleted from is now conditional based on whether or not the dialect supports the concept of "rowcount" for UPDATE or DELETE rows matched. Some third party dialects do not support this concept. Pull request courtesy Ke Zhu. .. change:: :tags: bug, operations :tickets: 655 Fixed long-standing bug where an inline column CHECK constraint would not be rendered within an "ADD COLUMN" operation. The DDL compiler is now consulted for inline constraints within the :meth:`.Operations.add_column` method as is done for regular CREATE TABLE operations. .. changelog:: :version: 1.4.0 :released: February 4, 2020 .. change:: :tags: change The internal inspection routines no longer use SQLAlchemy's ``Inspector.from_engine()`` method, which is expected to be deprecated in 1.4. The ``inspect()`` function is now used. .. change:: :tags: bug, autogenerate :tickets: 647 Adjusted the unique constraint comparison logic in a similar manner as that of :ticket:`421` did for indexes in order to take into account SQLAlchemy's own truncation of long constraint names when a naming convention is in use. Without this step, a name that is truncated by SQLAlchemy based on a unique constraint naming convention or hardcoded name will not compare properly. .. change:: :tags: feature, batch :tickets: 640 Added new parameters :paramref:`.BatchOperations.add_column.insert_before`, :paramref:`.BatchOperations.add_column.insert_after` which provide for establishing the specific position in which a new column should be placed. Also added :paramref:`.Operations.batch_alter_table.partial_reordering` which allows the complete set of columns to be reordered when the new table is created. Both operations apply only to when batch mode is recreating the whole table using ``recreate="always"``. Thanks to Marcin Szymanski for assistance with the implementation. .. change:: :tags: usecase, environment :tickets: 648 Moved the use of the ``__file__`` attribute at the base of the Alembic package into the one place that it is specifically needed, which is when the config attempts to locate the template directory. This helps to allow Alembic to be fully importable in environments that are using Python memory-only import schemes. Pull request courtesy layday. .. change:: :tags: bug, autogenerate :tickets: 605 A major rework of the "type comparison" logic is in place which changes the entire approach by which column datatypes are compared. Types are now compared based on the DDL string generated by the metadata type vs. the datatype reflected from the database. This means we compare types based on what would actually render and additionally if elements of the types change like string length, those changes are detected as well. False positives like those generated between SQLAlchemy Boolean and MySQL TINYINT should also be resolved. Thanks very much to Paul Becotte for lots of hard work and patience on this one. .. seealso:: :ref:`autogenerate_detects` - updated comments on type comparison .. changelog:: :version: 1.3.3 :released: January 22, 2020 .. change:: :tags: bug, postgresql :tickets: 637 Fixed issue where COMMENT directives for PostgreSQL failed to correctly include an explicit schema name, as well as correct quoting rules for schema, table, and column names. Pull request courtesy Matthew Sills. .. change:: :tags: usecase, operations :tickets: 624 Added support for rendering of "computed" elements on :class:`.Column` objects, supported in SQLAlchemy via the new :class:`.Computed` element introduced in version 1.3.11. Pull request courtesy Federico Caselli. Note that there is currently no support for ALTER COLUMN to add, remove, or modify the "GENERATED ALWAYS AS" element from a column; at least for PostgreSQL, it does not seem to be supported by the database. Additionally, SQLAlchemy does not currently reliably reflect the "GENERATED ALWAYS AS" phrase from an existing column, so there is also no autogenerate support for addition or removal of the :class:`.Computed` element to or from an existing column, there is only support for adding new columns that include the :class:`.Computed` element. In the case that the :class:`.Computed` element is removed from the :class:`.Column` object in the table metadata, PostgreSQL and Oracle currently reflect the "GENERATED ALWAYS AS" expression as the "server default" which will produce an op that tries to drop the element as a default. .. changelog:: :version: 1.3.2 :released: December 16, 2019 .. change:: :tags: bug, api, autogenerate :tickets: 635 Fixed regression introduced by :ticket:`579` where server default rendering functions began to require a dialect implementation, however the :func:`.render_python_code` convenience function did not include one, thus causing the function to fail when used in a server default context. The function now accepts a migration context argument and also creates one against the default dialect if one is not provided. .. changelog:: :version: 1.3.1 :released: November 13, 2019 .. change:: :tags: bug, mssql :tickets: 621 Fixed bug in MSSQL dialect where the drop constraint execution steps used to remove server default or implicit foreign key constraint failed to take into account the schema name of the target table. .. changelog:: :version: 1.3.0 :released: October 31, 2019 .. change:: :tags: feature, command :tickets: 608 Added support for ALEMBIC_CONFIG environment variable, refers to the location of the alembic configuration script in lieu of using the -c command line option. .. change:: :tags: bug, autogenerate :tickets: 131 Fixed bug in new Variant autogenerate where the order of the arguments to Variant were mistakenly reversed. .. change:: :tags: change, compatibility Some internal modifications have been made to how the names of indexes and unique constraints work to make use of new functions added in SQLAlchemy 1.4, so that SQLAlchemy has more flexibility over how naming conventions may be applied to these objects. .. changelog:: :version: 1.2.1 :released: September 24, 2019 .. change:: :tags: bug, command :tickets: 601 Reverted the name change of the "revisions" argument to :func:`.command.stamp` to "revision" as apparently applications are calling upon this argument as a keyword name. Pull request courtesy Thomas Bechtold. Special translations are also added to the command line interface so that it is still known as "revisions" in the CLI. .. change:: :tags: bug, tests :tickets: 592 Removed the "test requirements" from "setup.py test", as this command now only emits a removal error in any case and these requirements are unused. .. changelog:: :version: 1.2.0 :released: September 20, 2019 .. change:: :tags: feature, command :tickets: 473 Added new ``--purge`` flag to the ``alembic stamp`` command, which will unconditionally erase the version table before stamping anything. This is useful for development where non-existent version identifiers might be left within the table. Additionally, ``alembic.stamp`` now supports a list of revision identifiers, which are intended to allow setting up muliple heads at once. Overall handling of version identifiers within the ``alembic.stamp`` command has been improved with many new tests and use cases added. .. change:: :tags: bug, autogenerate :tickets: 550 Improved the Python rendering of a series of migration operations such that a single "pass" is rendered for a :class:`.UpgradeOps` or :class:`.DowngradeOps` based on if no lines of Python code actually rendered under the operation, rather than whether or not sub-directives exist. Removed extra "pass" lines that would generate from the :class:`.ModifyTableOps` directive so that these aren't duplicated under operation rewriting scenarios. .. change:: :tags: feature, runtime :tickets: 123 Added new feature :meth:`.MigrationContext.autocommit_block`, a special directive which will provide for a non-transactional block inside of a migration script. The feature requires that: the database driver (e.g. DBAPI) supports the AUTOCOMMIT isolation mode. The directive also necessarily needs to COMMIT the existing transaction in progress in order to enter autocommit mode. .. seealso:: :meth:`.MigrationContext.autocommit_block` .. change:: :tags: change: py3k Python 3.4 support is dropped, as the upstream tooling (pip, mysqlclient) etc are already dropping support for Python 3.4, which itself is no longer maintained. .. change:: :tags: usecase, autogenerate :tickets: 518 Added autogenerate support for :class:`.Column` objects that have dialect-specific ``**kwargs``, support first added in SQLAlchemy 1.3. This includes SQLite "on conflict" as well as options used by some third party dialects. .. change:: :tags: usecase, autogenerate :tickets: 131 Added rendering for SQLAlchemy ``Variant`` datatypes, which render as the base type plus one or more ``.with_variant()`` method calls. .. change:: :tags: usecase, commands :tickets: 534 Made the command interface revision lookup behavior more strict in that an Alembic revision number is only resolved based on a partial match rules if it has at least four characters, to prevent simple typographical issues from inadvertently running migrations. .. change:: :tags: feature, commands :tickets: 307 Added "post write hooks" to revision generation. These allow custom logic to run after a revision Python script is generated, typically for the purpose of running code formatters such as "Black" or "autopep8", but may be used for any arbitrary post-render hook as well, including custom Python functions or scripts. The hooks are enabled by providing a ``[post_write_hooks]`` section in the alembic.ini file. A single hook is provided which runs an arbitrary Python executable on the newly generated revision script, which can be configured to run code formatters such as Black; full examples are included in the documentation. .. seealso:: :ref:`post_write_hooks` .. change:: :tags: feature, environment :tickets: 463 Added new flag ``--package`` to ``alembic init``. For environments where the Alembic migration files and such are within the package tree and importable as modules, this flag can be specified which will add the additional ``__init__.py`` files in the version location and the environment location. .. change:: :tags: bug, autogenerate :tickets: 549 Fixed bug where rendering of comment text for table-level comments within :meth:`.Operations.create_table_comment` and :meth:`.Operations.drop_table_comment` was not properly quote-escaped within rendered Python code for autogenerate. .. change:: :tags: bug, autogenerate :tickets: 505 Modified the logic of the :class:`.Rewriter` object such that it keeps a memoization of which directives it has processed, so that it can ensure it processes a particular directive only once, and additionally fixed :class:`.Rewriter` so that it functions correctly for multiple-pass autogenerate schemes, such as the one illustrated in the "multidb" template. By tracking which directives have been processed, a multiple-pass scheme which calls upon the :class:`.Rewriter` multiple times for the same structure as elements are added can work without running duplicate operations on the same elements more than once. .. changelog:: :version: 1.1.0 :released: August 26, 2019 .. change:: :tags: change Alembic 1.1 bumps the minimum version of SQLAlchemy to 1.1. As was the case before, Python requirements remain at Python 2.7, or in the 3.x series Python 3.4. .. change:: :tags: change, internals The test suite for Alembic now makes use of SQLAlchemy's testing framework directly. Previously, Alembic had its own version of this framework that was mostly copied from that of SQLAlchemy to enable testing with older SQLAlchemy versions. The majority of this code is now removed so that both projects can leverage improvements from a common testing framework. .. change:: :tags: bug, commands :tickets: 562 Fixed bug where the double-percent logic applied to some dialects such as psycopg2 would be rendered in ``--sql`` mode, by allowing dialect options to be passed through to the dialect used to generate SQL and then providing ``paramstyle="named"`` so that percent signs need not be doubled. For users having this issue, existing env.py scripts need to add ``dialect_opts={"paramstyle": "named"}`` to their offline context.configure(). See the ``alembic/templates/generic/env.py`` template for an example. .. change:: :tags: bug, py3k Fixed use of the deprecated "imp" module, which is used to detect pep3147 availability as well as to locate .pyc files, which started emitting deprecation warnings during the test suite. The warnings were not being emitted earlier during the test suite, the change is possibly due to changes in py.test itself but this is not clear. The check for pep3147 is set to True for any Python version 3.5 or greater now and importlib is used when available. Note that some dependencies such as distutils may still be emitting this warning. Tests are adjusted to accommodate for dependencies that emit the warning as well. .. change:: :tags: bug, mysql :tickets: 594 Fixed issue where emitting a change of column name for MySQL did not preserve the column comment, even if it were specified as existing_comment. .. change:: :tags: bug, setup :tickets: 592 Removed the "python setup.py test" feature in favor of a straight run of "tox". Per Pypa / pytest developers, "setup.py" commands are in general headed towards deprecation in favor of tox. The tox.ini script has been updated such that running "tox" with no arguments will perform a single run of the test suite against the default installed Python interpreter. .. seealso:: https://github.com/pypa/setuptools/issues/1684 https://github.com/pytest-dev/pytest/issues/5534 .. change:: :tags: usecase, commands :tickets: 571 The "alembic init" command will now proceed if the target directory exists as long as it's still empty. Previously, it would not proceed if the directory existed. The new behavior is modeled from what git does, to accommodate for container or other deployments where an Alembic target directory may need to be already mounted instead of being created with alembic init. Pull request courtesy Aviskar KC. .. changelog:: :version: 1.0.11 :released: June 25, 2019 .. change:: :tags: bug, sqlite, autogenerate, batch :tickets: 579 SQLite server default reflection will ensure parenthesis are surrounding a column default expression that is detected as being a non-constant expression, such as a ``datetime()`` default, to accommodate for the requirement that SQL expressions have to be parenthesized when being sent as DDL. Parenthesis are not added to constant expressions to allow for maximum cross-compatibility with other dialects and existing test suites (such as Alembic's), which necessarily entails scanning the expression to eliminate for constant numeric and string values. The logic is added to the two "reflection->DDL round trip" paths which are currently autogenerate and batch migration. Within autogenerate, the logic is on the rendering side, whereas in batch the logic is installed as a column reflection hook. .. change:: :tags: bug, sqlite, autogenerate :tickets: 579 Improved SQLite server default comparison to accommodate for a ``text()`` construct that added parenthesis directly vs. a construct that relied upon the SQLAlchemy SQLite dialect to render the parenthesis, as well as improved support for various forms of constant expressions such as values that are quoted vs. non-quoted. .. change:: :tags: bug, autogenerate Fixed bug where the "literal_binds" flag was not being set when autogenerate would create a server default value, meaning server default comparisons would fail for functions that contained literal values. .. change:: :tags: bug, mysql :tickets: 554 Added support for MySQL "DROP CHECK", which is added as of MySQL 8.0.16, separate from MariaDB's "DROP CONSTRAINT" for CHECK constraints. The MySQL Alembic implementation now checks for "MariaDB" in server_version_info to decide which one to use. .. change:: :tags: bug, mysql, operations :tickets: 564 Fixed issue where MySQL databases need to use CHANGE COLUMN when altering a server default of CURRENT_TIMESTAMP, NOW() and probably other functions that are only usable with DATETIME/TIMESTAMP columns. While MariaDB supports both CHANGE and ALTER COLUMN in this case, MySQL databases only support CHANGE. So the new logic is that if the server default change is against a DateTime-oriented column, the CHANGE format is used unconditionally, as in the vast majority of cases the server default is to be CURRENT_TIMESTAMP which may also be potentially bundled with an "ON UPDATE CURRENT_TIMESTAMP" directive, which SQLAlchemy does not currently support as a distinct field. The fix addiionally improves the server default comparison logic when the "ON UPDATE" clause is present and there are parenthesis to be adjusted for as is the case on some MariaDB versions. .. change:: :tags: bug, environment Warnings emitted by Alembic now include a default stack level of 2, and in some cases it's set to 3, in order to help warnings indicate more closely where they are originating from. Pull request courtesy Ash Berlin-Taylor. .. change:: :tags: bug, py3k :tickets: 563 Replaced the Python compatibility routines for ``getargspec()`` with a fully vendored version based on ``getfullargspec()`` from Python 3.3. Originally, Python was emitting deprecation warnings for this function in Python 3.8 alphas. While this change was reverted, it was observed that Python 3 implementations for ``getfullargspec()`` are an order of magnitude slower as of the 3.4 series where it was rewritten against ``Signature``. While Python plans to improve upon this situation, SQLAlchemy projects for now are using a simple replacement to avoid any future issues. .. changelog:: :version: 1.0.10 :released: April 28, 2019 .. change:: :tags: bug, commands :tickets: 552 Fixed bug introduced in release 0.9.0 where the helptext for commands inadvertently got expanded to include function docstrings from the command.py module. The logic has been adjusted to only refer to the first line(s) preceding the first line break within each docstring, as was the original intent. .. change:: :tags: bug, operations, mysql :tickets: 551 Added an assertion in :meth:`.RevisionMap.get_revisions` and other methods which ensures revision numbers are passed as strings or collections of strings. Driver issues particularly on MySQL may inadvertently be passing bytes here which leads to failures later on. .. change:: :tags: bug, autogenerate, mysql :tickets: 553 Fixed bug when using the :paramref:`.EnvironmentContext.configure.compare_server_default` flag set to ``True`` where a server default that is introduced in the table metadata on an ``Integer`` column, where there is no existing server default in the database, would raise a ``TypeError``. .. changelog:: :version: 1.0.9 :released: April 15, 2019 .. change:: :tags: bug, operations :tickets: 548 Simplified the internal scheme used to generate the ``alembic.op`` namespace to no longer attempt to generate full method signatures (e.g. rather than generic ``*args, **kw``) as this was not working in most cases anyway, while in rare circumstances it would in fact sporadically have access to the real argument names and then fail when generating the function due to missing symbols in the argument signature. .. changelog:: :version: 1.0.8 :released: March 4, 2019 .. change:: :tags: bug, operations :tickets: 528 Removed use of deprecated ``force`` parameter for SQLAlchemy quoting functions as this parameter will be removed in a future release. Pull request courtesy Parth Shandilya(ParthS007). .. change:: :tags: bug, autogenerate, postgresql, py3k :tickets: 541 Fixed issue where server default comparison on the PostgreSQL dialect would fail for a blank string on Python 3.7 only, due to a change in regular expression behavior in Python 3.7. .. changelog:: :version: 1.0.7 :released: January 25, 2019 .. change:: :tags: bug, autogenerate :tickets: 529 Fixed issue in new comment support where autogenerated Python code for comments wasn't using ``repr()`` thus causing issues with quoting. Pull request courtesy Damien Garaud. .. changelog:: :version: 1.0.6 :released: January 13, 2019 .. change:: :tags: feature, operations :tickets: 422 Added Table and Column level comments for supported backends. New methods :meth:`.Operations.create_table_comment` and :meth:`.Operations.drop_table_comment` are added. A new arguments :paramref:`.Operations.alter_column.comment` and :paramref:`.Operations.alter_column.existing_comment` are added to :meth:`.Operations.alter_column`. Autogenerate support is also added to ensure comment add/drops from tables and columns are generated as well as that :meth:`.Operations.create_table`, :meth:`.Operations.add_column` both include the comment field from the source :class:`.Table` or :class:`.Column` object. .. changelog:: :version: 1.0.5 :released: November 27, 2018 .. change:: :tags: bug, py3k :tickets: 507 Resolved remaining Python 3 deprecation warnings, covering the use of inspect.formatargspec() with a vendored version copied from the Python standard library, importing collections.abc above Python 3.3 when testing against abstract base classes, fixed one occurrence of log.warn(), as well as a few invalid escape sequences. .. changelog:: :version: 1.0.4 :released: November 27, 2018 .. change:: :tags: change Code hosting has been moved to GitHub, at https://github.com/sqlalchemy/alembic. Additionally, the main Alembic website documentation URL is now https://alembic.sqlalchemy.org. .. changelog:: :version: 1.0.3 :released: November 14, 2018 .. change:: :tags: bug, mssql :tickets: 516 Fixed regression caused by :ticket:`513`, where the logic to consume ``mssql_include`` was not correctly interpreting the case where the flag was not present, breaking the ``op.create_index`` directive for SQL Server as a whole. .. changelog:: :version: 1.0.2 :released: October 31, 2018 .. change:: :tags: bug, autogenerate :tickets: 515 The ``system=True`` flag on :class:`.Column`, used primarily in conjunction with the Postgresql "xmin" column, now renders within the autogenerate render process, allowing the column to be excluded from DDL. Additionally, adding a system=True column to a model will produce no autogenerate diff as this column is implicitly present in the database. .. change:: :tags: bug, mssql :tickets: 513 Fixed issue where usage of the SQL Server ``mssql_include`` option within a :meth:`.Operations.create_index` would raise a KeyError, as the additional column(s) need to be added to the table object used by the construct internally. .. changelog:: :version: 1.0.1 :released: October 17, 2018 .. change:: :tags: bug, commands :tickets: 497 Fixed an issue where revision descriptions were essentially being formatted twice. Any revision description that contained characters like %, writing output to stdout will fail because the call to config.print_stdout attempted to format any additional args passed to the function. This fix now only applies string formatting if any args are provided along with the output text. .. change:: :tags: bug, autogenerate :tickets: 512 Fixed issue where removed method ``union_update()`` was used when a customized :class:`.MigrationScript` instance included entries in the ``.imports`` data member, raising an AttributeError. .. changelog:: :version: 1.0.0 :released: July 13, 2018 :released: July 13, 2018 :released: July 13, 2018 .. change:: :tags: feature, general :tickets: 491 For Alembic 1.0, Python 2.6 / 3.3 support is being dropped, allowing a fixed setup.py to be built as well as universal wheels. Pull request courtesy Hugo. .. change:: :tags: feature, general With the 1.0 release, Alembic's minimum SQLAlchemy support version moves to 0.9.0, previously 0.7.9. .. change:: :tags: bug, batch :tickets: 502 Fixed issue in batch where dropping a primary key column, then adding it back under the same name but without the primary_key flag, would not remove it from the existing PrimaryKeyConstraint. If a new PrimaryKeyConstraint is added, it is used as-is, as was the case before. .. changelog:: :version: 0.9.10 :released: June 29, 2018 .. change:: :tags: bug, autogenerate The "op.drop_constraint()" directive will now render using ``repr()`` for the schema name, in the same way that "schema" renders for all the other op directives. Pull request courtesy Denis Kataev. .. change:: :tags: bug, autogenerate :tickets: 494 Added basic capabilities for external dialects to support rendering of "nested" types, like arrays, in a manner similar to that of the Postgresql dialect. .. change:: :tags: bug, autogenerate Fixed issue where "autoincrement=True" would not render for a column that specified it, since as of SQLAlchemy 1.1 this is no longer the default value for "autoincrement". Note the behavior only takes effect against the SQLAlchemy 1.1.0 and higher; for pre-1.1 SQLAlchemy, "autoincrement=True" does not render as was the case before. Pull request courtesy Elad Almos. .. changelog:: :version: 0.9.9 :released: March 22, 2018 .. change:: :tags: feature, commands :tickets: 481 Added new flag ``--indicate-current`` to the ``alembic history`` command. When listing versions, it will include the token "(current)" to indicate the given version is a current head in the target database. Pull request courtesy Kazutaka Mise. .. change:: :tags: bug, autogenerate, mysql :tickets: 455 The fix for :ticket:`455` in version 0.9.6 involving MySQL server default comparison was entirely non functional, as the test itself was also broken and didn't reveal that it wasn't working. The regular expression to compare server default values like CURRENT_TIMESTAMP to current_timestamp() is repaired. .. change:: :tags: bug, mysql, autogenerate :tickets: 483 Fixed bug where MySQL server default comparisons were basically not working at all due to incorrect regexp added in :ticket:`455`. Also accommodates for MariaDB 10.2 quoting differences in reporting integer based server defaults. .. change:: :tags: bug, operations, mysql :tickets: 487 Fixed bug in ``op.drop_constraint()`` for MySQL where quoting rules would not be applied to the constraint name. .. changelog:: :version: 0.9.8 :released: February 16, 2018 .. change:: :tags: bug, runtime :tickets: 482 Fixed bug where the :meth:`.Script.as_revision_number` method did not accommodate for the 'heads' identifier, which in turn caused the :meth:`.EnvironmentContext.get_head_revisions` and :meth:`.EnvironmentContext.get_revision_argument` methods to be not usable when multiple heads were present. The :meth:.`EnvironmentContext.get_head_revisions` method returns a tuple in all cases as documented. .. change:: :tags: bug, postgresql, autogenerate :tickets: 478 Fixed bug where autogenerate of :class:`.ExcludeConstraint` would render a raw quoted name for a Column that has case-sensitive characters, which when invoked as an inline member of the Table would produce a stack trace that the quoted name is not found. An incoming Column object is now rendered as ``sa.column('name')``. .. change:: :tags: bug, autogenerate :tickets: 468 Fixed bug where the indexes would not be included in a migration that was dropping the owning table. The fix now will also emit DROP INDEX for the indexes ahead of time, but more importantly will include CREATE INDEX in the downgrade migration. .. change:: :tags: bug, postgresql :tickets: 480 Fixed the autogenerate of the module prefix when rendering the text_type parameter of postgresql.HSTORE, in much the same way that we do for ARRAY's type and JSON's text_type. .. change:: :tags: bug, mysql :tickets: 479 Added support for DROP CONSTRAINT to the MySQL Alembic dialect to support MariaDB 10.2 which now has real CHECK constraints. Note this change does **not** add autogenerate support, only support for op.drop_constraint() to work. .. changelog:: :version: 0.9.7 :released: January 16, 2018 .. change:: :tags: bug, autogenerate :tickets: 472 Fixed regression caused by :ticket:`421` which would cause case-sensitive quoting rules to interfere with the comparison logic for index names, thus causing indexes to show as added for indexes that have case-sensitive names. Works with SQLAlchemy 0.9 and later series. .. change:: :tags: bug, postgresql, autogenerate :tickets: 461 Fixed bug where autogenerate would produce a DROP statement for the index implicitly created by a Postgresql EXCLUDE constraint, rather than skipping it as is the case for indexes implicitly generated by unique constraints. Makes use of SQLAlchemy 1.0.x's improved "duplicates index" metadata and requires at least SQLAlchemy version 1.0.x to function correctly. .. changelog:: :version: 0.9.6 :released: October 13, 2017 .. change:: :tags: bug, commands :tickets: 458 Fixed a few Python3.6 deprecation warnings by replacing ``StopIteration`` with ``return``, as well as using ``getfullargspec()`` instead of ``getargspec()`` under Python 3. .. change:: :tags: bug, commands :tickets: 441 An addition to :ticket:`441` fixed in 0.9.5, we forgot to also filter for the ``+`` sign in migration names which also breaks due to the relative migrations feature. .. change:: :tags: bug, autogenerate :tickets: 442 Fixed bug expanding upon the fix for :ticket:`85` which adds the correct module import to the "inner" type for an ``ARRAY`` type, the fix now accommodates for the generic ``sqlalchemy.types.ARRAY`` type added in SQLAlchemy 1.1, rendering the inner type correctly regardless of whether or not the Postgresql dialect is present. .. change:: :tags: bug, mysql :tickets: 455 Fixed bug where server default comparison of CURRENT_TIMESTAMP would fail on MariaDB 10.2 due to a change in how the function is represented by the database during reflection. .. change:: :tags: bug, autogenerate Fixed bug where comparison of ``Numeric`` types would produce a difference if the Python-side ``Numeric`` inadvertently specified a non-None "scale" with a "precision" of None, even though this ``Numeric`` type will pass over the "scale" argument when rendering. Pull request courtesy Ivan Mmelnychuk. .. change:: :tags: feature, commands :tickets: 447 The ``alembic history`` command will now make use of the revision environment ``env.py`` unconditionally if the ``revision_environment`` configuration flag is set to True. Previously, the environment would only be invoked if the history specification were against a database-stored revision token. .. change:: :tags: bug, batch :tickets: 457 The name of the temporary table in batch mode is now generated off of the original table name itself, to avoid conflicts for the unusual case of multiple batch operations running against the same database schema at the same time. .. change:: :tags: bug, autogenerate :tickets: 456 A :class:`.ForeignKeyConstraint` can now render correctly if the ``link_to_name`` flag is set, as it will not attempt to resolve the name from a "key" in this case. Additionally, the constraint will render as-is even if the remote column name isn't present on the referenced remote table. .. change:: :tags: bug, runtime, py3k :tickets: 449 Reworked "sourceless" system to be fully capable of handling any combination of: Python2/3x, pep3149 or not, PYTHONOPTIMIZE or not, for locating and loading both env.py files as well as versioning files. This includes: locating files inside of ``__pycache__`` as well as listing out version files that might be only in ``versions/__pycache__``, deduplicating version files that may be in ``versions/__pycache__`` and ``versions/`` at the same time, correctly looking for .pyc or .pyo files based on if pep488 is present or not. The latest Python3x deprecation warnings involving importlib are also corrected. .. changelog:: :version: 0.9.5 :released: August 9, 2017 .. change:: :tags: bug, commands :tickets: 441 A :class:`.CommandError` is raised if the "--rev-id" passed to the :func:`.revision` command contains dashes or at-signs, as this interferes with the command notation used to locate revisions. .. change:: :tags: bug, postgresql :tickets: 424 Added support for the dialect-specific keyword arguments to :meth:`.Operations.drop_index`. This includes support for ``postgresql_concurrently`` and others. .. change:: :tags: bug, commands Fixed bug in timezone feature introduced in :ticket:`425` when the creation date in a revision file is calculated, to accommodate for timezone names that contain mixed-case characters in their name as opposed to all uppercase. Pull request courtesy Nils Philippsen. .. changelog:: :version: 0.9.4 :released: July 31, 2017 .. change:: :tags: bug, runtime Added an additional attribute to the new :paramref:`.EnvironmentContext.configure.on_version_apply` API, :attr:`.MigrationInfo.up_revision_ids`, to accommodate for the uncommon case of the ``alembic stamp`` command being used to move from multiple branches down to a common branchpoint; there will be multiple "up" revisions in this one case. .. changelog:: :version: 0.9.3 :released: July 6, 2017 .. change:: :tags: feature, runtime Added a new callback hook :paramref:`.EnvironmentContext.configure.on_version_apply`, which allows user-defined code to be invoked each time an individual upgrade, downgrade, or stamp operation proceeds against a database. Pull request courtesy John Passaro. .. change:: 433 :tags: bug, autogenerate :tickets: 433 Fixed bug where autogen comparison of a :class:`.Variant` datatype would not compare to the dialect level type for the "default" implementation of the :class:`.Variant`, returning the type as changed between database and table metadata. .. change:: 431 :tags: bug, tests :tickets: 431 Fixed unit tests to run correctly under the SQLAlchemy 1.0.x series prior to version 1.0.10 where a particular bug involving Postgresql exclude constraints was fixed. .. changelog:: :version: 0.9.2 :released: May 18, 2017 .. change:: 429 :tags: bug, mssql :tickets: 429 Repaired :meth:`.Operations.rename_table` for SQL Server when the target table is in a remote schema, the schema name is omitted from the "new name" argument. .. change:: 425 :tags: feature, commands :tickets: 425 Added a new configuration option ``timezone``, a string timezone name that will be applied to the create date timestamp rendered inside the revision file as made available to the ``file_template`` used to generate the revision filename. Note this change adds the ``python-dateutil`` package as a dependency. .. change:: 421 :tags: bug, autogenerate :tickets: 421 The autogenerate compare scheme now takes into account the name truncation rules applied by SQLAlchemy's DDL compiler to the names of the :class:`.Index` object, when these names are dynamically truncated due to a too-long identifier name. As the identifier truncation is deterministic, applying the same rule to the metadata name allows correct comparison to the database-derived name. .. change:: 419 :tags: bug environment :tickets: 419 A warning is emitted when an object that's not a :class:`~sqlalchemy.engine.Connection` is passed to :meth:`.EnvironmentContext.configure`. For the case of a :class:`~sqlalchemy.engine.Engine` passed, the check for "in transaction" introduced in version 0.9.0 has been relaxed to work in the case of an attribute error, as some users appear to be passing an :class:`~sqlalchemy.engine.Engine` and not a :class:`~sqlalchemy.engine.Connection`. .. changelog:: :version: 0.9.1 :released: March 1, 2017 .. change:: 417 :tags: bug, commands :tickets: 417, 369 An adjustment to the bug fix for :ticket:`369` to accommodate for env.py scripts that use an enclosing transaction distinct from the one that the context provides, so that the check for "didn't commit the transaction" doesn't trigger in this scenario. .. changelog:: :version: 0.9.0 :released: February 28, 2017 .. change:: 38 :tags: feature, autogenerate :tickets: 38 The :paramref:`.EnvironmentContext.configure.target_metadata` parameter may now be optionally specified as a sequence of :class:`.MetaData` objects instead of a single :class:`.MetaData` object. The autogenerate process will process the sequence of :class:`.MetaData` objects in order. .. change:: 369 :tags: bug, commands :tickets: 369 A :class:`.CommandError` is now raised when a migration file opens a database transaction and does not close/commit/rollback, when the backend database or environment options also specify transactional_ddl is False. When transactional_ddl is not in use, Alembic doesn't close any transaction so a transaction opened by a migration file will cause the following migrations to fail to apply. .. change:: 413 :tags: bug, autogenerate, mysql :tickets: 413 The ``autoincrement=True`` flag is now rendered within the :meth:`.Operations.alter_column` operation if the source column indicates that this flag should be set to True. The behavior is sensitive to the SQLAlchemy version in place, as the "auto" default option is new in SQLAlchemy 1.1. When the source column indicates autoincrement as True or "auto", the flag will render as True if the original column contextually indicates that it should have "autoincrement" keywords, and when the source column explcitly sets it to False, this is also rendered. The behavior is intended to preserve the AUTO_INCREMENT flag on MySQL as the column is fully recreated on this backend. Note that this flag does **not** support alteration of a column's "autoincrement" status, as this is not portable across backends. .. change:: 411 :tags: bug, postgresql :tickets: 411 Fixed bug where Postgresql JSON/JSONB types rendered on SQLAlchemy 1.1 would render the "astext_type" argument which defaults to the ``Text()`` type without the module prefix, similarly to the issue with ARRAY fixed in :ticket:`85`. .. change:: 85 :tags: bug, postgresql :tickets: 85 Fixed bug where Postgresql ARRAY type would not render the import prefix for the inner type; additionally, user-defined renderers take place for the inner type as well as the outer type. Pull request courtesy Paul Brackin. .. change:: process_revision_directives_command :tags: feature, autogenerate Added a keyword argument ``process_revision_directives`` to the :func:`.command.revision` API call. This function acts in the same role as the environment-level :paramref:`.EnvironmentContext.configure.process_revision_directives`, and allows API use of the command to drop in an ad-hoc directive process function. This function can be used among other things to place a complete :class:`.MigrationScript` structure in place. .. change:: 412 :tags: feature, postgresql :tickets: 412 Added support for Postgresql EXCLUDE constraints, including the operation directive :meth:`.Operations.create_exclude_constraints` as well as autogenerate render support for the ``ExcludeConstraint`` object as present in a ``Table``. Autogenerate detection for an EXCLUDE constraint added or removed to/from an existing table is **not** implemented as the SQLAlchemy Postgresql dialect does not yet support reflection of EXCLUDE constraints. Additionally, unknown constraint types now warn when encountered within an autogenerate action rather than raise. .. change:: fk_schema_compare :tags: bug, operations Fixed bug in :func:`.ops.create_foreign_key` where the internal table representation would not be created properly if the foreign key referred to a table in a different schema of the same name. Pull request courtesy Konstantin Lebedev. .. changelog:: :version: 0.8.10 :released: January 17, 2017 .. change:: 406 :tags: bug, versioning :tickets: 406 The alembic_version table, when initially created, now establishes a primary key constraint on the "version_num" column, to suit database engines that don't support tables without primary keys. This behavior can be controlled using the parameter :paramref:`.EnvironmentContext.configure.version_table_pk`. Note that this change only applies to the initial creation of the alembic_version table; it does not impact any existing alembic_version table already present. .. change:: 402 :tags: bug, batch :tickets: 402 Fixed bug where doing ``batch_op.drop_constraint()`` against the primary key constraint would fail to remove the "primary_key" flag from the column, resulting in the constraint being recreated. .. change:: update_uq_dedupe :tags: bug, autogenerate, oracle Adjusted the logic originally added for :ticket:`276` that detects MySQL unique constraints which are actually unique indexes to be generalized for any dialect that has this behavior, for SQLAlchemy version 1.0 and greater. This is to allow for upcoming SQLAlchemy support for unique constraint reflection for Oracle, which also has no dedicated concept of "unique constraint" and instead establishes a unique index. .. change:: 356 :tags: bug, versioning :tickets: 356 Added a file ignore for Python files of the form ``.#.py``, which are generated by the Emacs editor. Pull request courtesy Markus Mattes. .. changelog:: :version: 0.8.9 :released: November 28, 2016 .. change:: 393 :tags: bug, autogenerate :tickets: 393 Adjustment to the "please adjust!" comment in the script.py.mako template so that the generated comment starts with a single pound sign, appeasing flake8. .. change:: :tags: bug, batch :tickets: 391 Batch mode will not use CAST() to copy data if type_ is given, however the basic type affinity matches that of the existing type. This to avoid SQLite's CAST of TIMESTAMP which results in truncation of the data, in those cases where the user needs to add redundant type_ for other reasons. .. change:: :tags: bug, autogenerate :tickets: 393 Continued pep8 improvements by adding appropriate whitespace in the base template for generated migrations. Pull request courtesy Markus Mattes. .. change:: :tags: bug, revisioning Added an additional check when reading in revision files to detect if the same file is being read twice; this can occur if the same directory or a symlink equivalent is present more than once in version_locations. A warning is now emitted and the file is skipped. Pull request courtesy Jiri Kuncar. .. change:: :tags: bug, autogenerate :tickets: 395 Fixed bug where usage of a custom TypeDecorator which returns a per-dialect type via :meth:`.TypeDecorator.load_dialect_impl` that differs significantly from the default "impl" for the type decorator would fail to compare correctly during autogenerate. .. change:: :tags: bug, autogenerate, postgresql :tickets: 392 Fixed bug in Postgresql "functional index skip" behavior where a functional index that ended in ASC/DESC wouldn't be detected as something we can't compare in autogenerate, leading to duplicate definitions in autogenerated files. .. change:: :tags: bug, versioning Fixed bug where the "base" specifier, as in "base:head", could not be used explicitly when ``--sql`` mode was present. .. changelog:: :version: 0.8.8 :released: September 12, 2016 .. change:: :tags: autogenerate The imports in the default script.py.mako are now at the top so that flake8 editors don't complain by default. PR courtesy Guilherme Mansur. .. change:: :tags: feature, operations, postgresql :tickets: 292 Added support for the USING clause to the ALTER COLUMN operation for Postgresql. Support is via the :paramref:`.op.alter_column.postgresql_using` parameter. Pull request courtesy Frazer McLean. .. change:: :tags: feature, autogenerate Autogenerate with type comparison enabled will pick up on the timezone setting changing between DateTime types. Pull request courtesy David Szotten. .. changelog:: :version: 0.8.7 :released: July 26, 2016 .. change:: :tags: bug, versioning :tickets: 336 Fixed bug where upgrading to the head of a branch which is already present would fail, only if that head were also the dependency of a different branch that is also upgraded, as the revision system would see this as trying to go in the wrong direction. The check here has been refined to distinguish between same-branch revisions out of order vs. movement along sibling branches. .. change:: :tags: bug, versioning :tickets: 379 Adjusted the version traversal on downgrade such that we can downgrade to a version that is a dependency for a version in a different branch, *without* needing to remove that dependent version as well. Previously, the target version would be seen as a "merge point" for it's normal up-revision as well as the dependency. This integrates with the changes for :ticket:`377` and :ticket:`378` to improve treatment of branches with dependencies overall. .. change:: :tags: bug, versioning :tickets: 377 Fixed bug where a downgrade to a version that is also a dependency to a different branch would fail, as the system attempted to treat this as an "unmerge" of a merge point, when in fact it doesn't have the other side of the merge point available for update. .. change:: :tags: bug, versioning :tickets: 378 Fixed bug where the "alembic current" command wouldn't show a revision as a current head if it were also a dependency of a version in a different branch that's also applied. Extra logic is added to extract "implied" versions of different branches from the top-level versions listed in the alembic_version table. .. change:: :tags: bug, versioning Fixed bug where a repr() or str() of a Script object would fail if the script had multiple dependencies. .. change:: :tags: bug, autogenerate Fixed bug in autogen where if the DB connection sends the default schema as "None", this "None" would be removed from the list of schemas to check if include_schemas were set. This could possibly impact using include_schemas with SQLite. .. change:: :tags: bug, batch Small adjustment made to the batch handling for reflected CHECK constraints to accommodate for SQLAlchemy 1.1 now reflecting these. Batch mode still does not support CHECK constraints from the reflected table as these can't be easily differentiated from the ones created by types such as Boolean. .. changelog:: :version: 0.8.6 :released: April 14, 2016 .. change:: :tags: bug, commands :tickets: 367 Errors which occur within the Mako render step are now intercepted and raised as CommandErrors like other failure cases; the Mako exception itself is written using template-line formatting to a temporary file which is named in the exception message. .. change:: :tags: bug, postgresql :tickets: 365 Added a fix to Postgresql server default comparison which first checks if the text of the default is identical to the original, before attempting to actually run the default. This accommodates for default-generation functions that generate a new value each time such as a uuid function. .. change:: :tags: bug, batch :tickets: 361 Fixed bug introduced by the fix for :ticket:`338` in version 0.8.4 where a server default could no longer be dropped in batch mode. Pull request courtesy Martin Domke. .. change:: :tags: bug, batch, mssql Fixed bug where SQL Server arguments for drop_column() would not be propagated when running under a batch block. Pull request courtesy Michal Petrucha. .. changelog:: :version: 0.8.5 :released: March 9, 2016 .. change:: :tags: bug, autogenerate :tickets: 335 Fixed bug where the columns rendered in a ``PrimaryKeyConstraint`` in autogenerate would inappropriately render the "key" of the column, not the name. Pull request courtesy Jesse Dhillon. .. change:: :tags: bug, batch :tickets: 354 Repaired batch migration support for "schema" types which generate constraints, in particular the ``Boolean`` datatype which generates a CHECK constraint. Previously, an alter column operation with this type would fail to correctly accommodate for the CHECK constraint on change both from and to this type. In the former case the operation would fail entirely, in the latter, the CHECK constraint would not get generated. Both of these issues are repaired. .. change:: :tags: bug, mysql :tickets: 355 Changing a schema type such as ``Boolean`` to a non-schema type would emit a drop constraint operation which emits ``NotImplementedError`` for the MySQL dialect. This drop constraint operation is now skipped when the constraint originates from a schema type. .. changelog:: :version: 0.8.4 :released: December 15, 2015 .. change:: :tags: feature, versioning A major improvement to the hash id generation function, which for some reason used an awkward arithmetic formula against uuid4() that produced values that tended to start with the digits 1-4. Replaced with a simple substring approach which provides an even distribution. Pull request courtesy Antti Haapala. .. change:: :tags: feature, autogenerate Added an autogenerate renderer for the :class:`.ExecuteSQLOp` operation object; only renders if given a plain SQL string, otherwise raises NotImplementedError. Can be of help with custom autogenerate sequences that includes straight SQL execution. Pull request courtesy Jacob Magnusson. .. change:: :tags: bug, batch :tickets: 345 Batch mode generates a FOREIGN KEY constraint that is self-referential using the ultimate table name, rather than ``_alembic_batch_temp``. When the table is renamed from ``_alembic_batch_temp`` back to the original name, the FK now points to the right name. This will **not** work if referential integrity is being enforced (eg. SQLite "PRAGMA FOREIGN_KEYS=ON") since the original table is dropped and the new table then renamed to that name, however this is now consistent with how foreign key constraints on **other** tables already operate with batch mode; these don't support batch mode if referential integrity is enabled in any case. .. change:: :tags: bug, autogenerate :tickets: 341 Added a type-level comparator that distinguishes :class:`.Integer`, :class:`.BigInteger`, and :class:`.SmallInteger` types and dialect-specific types; these all have "Integer" affinity so previously all compared as the same. .. change:: :tags: bug, batch :tickets: 338 Fixed bug where the ``server_default`` parameter of ``alter_column()`` would not function correctly in batch mode. .. change:: :tags: bug, autogenerate :tickets: 337 Adjusted the rendering for index expressions such that a :class:`.Column` object present in the source :class:`.Index` will not be rendered as table-qualified; e.g. the column name will be rendered alone. Table-qualified names here were failing on systems such as Postgresql. .. changelog:: :version: 0.8.3 :released: October 16, 2015 .. change:: :tags: bug, autogenerate :tickets: 332 Fixed an 0.8 regression whereby the "imports" dictionary member of the autogen context was removed; this collection is documented in the "render custom type" documentation as a place to add new imports. The member is now known as :attr:`.AutogenContext.imports` and the documentation is repaired. .. change:: :tags: bug, batch :tickets: 333 Fixed bug in batch mode where a table that had pre-existing indexes would create the same index on the new table with the same name, which on SQLite produces a naming conflict as index names are in a global namespace on that backend. Batch mode now defers the production of both existing and new indexes until after the entire table transfer operation is complete, which also means those indexes no longer take effect during the INSERT from SELECT section as well; the indexes are applied in a single step afterwards. .. change:: :tags: bug, tests Added "pytest-xdist" as a tox dependency, so that the -n flag in the test command works if this is not already installed. Pull request courtesy Julien Danjou. .. change:: :tags: bug, autogenerate, postgresql :tickets: 324 Fixed issue in PG server default comparison where model-side defaults configured with Python unicode literals would leak the "u" character from a ``repr()`` into the SQL used for comparison, creating an invalid SQL expression, as the server-side comparison feature in PG currently repurposes the autogenerate Python rendering feature to get a quoted version of a plain string default. .. changelog:: :version: 0.8.2 :released: August 25, 2015 .. change:: :tags: bug, autogenerate :tickets: 321 Added workaround in new foreign key option detection feature for MySQL's consideration of the "RESTRICT" option being the default, for which no value is reported from the database; the MySQL impl now corrects for when the model reports RESTRICT but the database reports nothing. A similar rule is in the default FK comparison to accommodate for the default "NO ACTION" setting being present in the model but not necessarily reported by the database, or vice versa. .. changelog:: :version: 0.8.1 :released: August 22, 2015 .. change:: :tags: feature, autogenerate A custom :paramref:`.EnvironmentContext.configure.process_revision_directives` hook can now generate op directives within the :class:`.UpgradeOps` and :class:`.DowngradeOps` containers that will be generated as Python code even when the ``--autogenerate`` flag is False; provided that ``revision_environment=True``, the full render operation will be run even in "offline" mode. .. change:: :tags: bug, autogenerate Repaired the render operation for the :class:`.ops.AlterColumnOp` object to succeed when the "existing_type" field was not present. .. change:: :tags: bug, autogenerate :tickets: 318 Fixed a regression 0.8 whereby the "multidb" environment template failed to produce independent migration script segments for the output template. This was due to the reorganization of the script rendering system for 0.8. To accommodate this change, the :class:`.MigrationScript` structure will in the case of multiple calls to :meth:`.MigrationContext.run_migrations` produce lists for the :attr:`.MigrationScript.upgrade_ops` and :attr:`.MigrationScript.downgrade_ops` attributes; each :class:`.UpgradeOps` and :class:`.DowngradeOps` instance keeps track of its own ``upgrade_token`` and ``downgrade_token``, and each are rendered individually. .. seealso:: :ref:`autogen_customizing_multiengine_revision` - additional detail on the workings of the :paramref:`.EnvironmentContext.configure.process_revision_directives` parameter when multiple calls to :meth:`.MigrationContext.run_migrations` are made. .. change:: :tags: feature, autogenerate :tickets: 317 Implemented support for autogenerate detection of changes in the ``ondelete``, ``onupdate``, ``initially`` and ``deferrable`` attributes of :class:`.ForeignKeyConstraint` objects on SQLAlchemy backends that support these on reflection (as of SQLAlchemy 1.0.8 currently Postgresql for all four, MySQL for ``ondelete`` and ``onupdate`` only). A constraint object that modifies these values will be reported as a "diff" and come out as a drop/create of the constraint with the modified values. The fields are ignored for backends which don't reflect these attributes (as of SQLA 1.0.8 this includes SQLite, Oracle, SQL Server, others). .. changelog:: :version: 0.8.0 :released: August 12, 2015 .. change:: :tags: bug, batch :tickets: 315 Fixed bug in batch mode where the ``batch_op.create_foreign_key()`` directive would be incorrectly rendered with the source table and schema names in the argument list. .. change:: :tags: feature, commands Added new command ``alembic edit``. This command takes the same arguments as ``alembic show``, however runs the target script file within $EDITOR. Makes use of the ``python-editor`` library in order to facilitate the handling of $EDITOR with reasonable default behaviors across platforms. Pull request courtesy Michel Albert. .. change:: :tags: feature, commands :tickets: 311 Added new multiple-capable argument ``--depends-on`` to the ``alembic revision`` command, allowing ``depends_on`` to be established at the command line level rather than having to edit the file after the fact. ``depends_on`` identifiers may also be specified as branch names at the command line or directly within the migration file. The values may be specified as partial revision numbers from the command line which will be resolved to full revision numbers in the output file. .. change:: :tags: change, operations A range of positional argument names have been changed to be clearer and more consistent across methods within the :class:`.Operations` namespace. The most prevalent form of name change is that the descriptive names ``constraint_name`` and ``table_name`` are now used where previously the name ``name`` would be used. This is in support of the newly modularized and extensible system of operation objects in :mod:`alembic.operations.ops`. An argument translation layer is in place across the ``alembic.op`` namespace that will ensure that named argument calling styles that use the old names will continue to function by transparently translating to the new names, also emitting a warning. This, along with the fact that these arguments are positional in any case and aren't normally passed with an explicit name, should ensure that the overwhelming majority of applications should be unaffected by this change. The *only* applications that are impacted are those that: 1. use the :class:`.Operations` object directly in some way, rather than calling upon the ``alembic.op`` namespace, and 2. invoke the methods on :class:`.Operations` using named keyword arguments for positional arguments like ``table_name``, ``constraint_name``, etc., which commonly were named ``name`` as of 0.7.6. 3. any application that is using named keyword arguments in place of positional argument for the recently added :class:`.BatchOperations` object may also be affected. The naming changes are documented as "versionchanged" for 0.8.0: * :meth:`.BatchOperations.create_check_constraint` * :meth:`.BatchOperations.create_foreign_key` * :meth:`.BatchOperations.create_index` * :meth:`.BatchOperations.create_unique_constraint` * :meth:`.BatchOperations.drop_constraint` * :meth:`.BatchOperations.drop_index` * :meth:`.Operations.create_check_constraint` * :meth:`.Operations.create_foreign_key` * :meth:`.Operations.create_primary_key` * :meth:`.Operations.create_index` * :meth:`.Operations.create_table` * :meth:`.Operations.create_unique_constraint` * :meth:`.Operations.drop_constraint` * :meth:`.Operations.drop_index` * :meth:`.Operations.drop_table` .. change:: :tags: feature, tests The default test runner via "python setup.py test" is now py.test. nose still works via run_tests.py. .. change:: :tags: feature, operations :tickets: 302 The internal system for Alembic operations has been reworked to now build upon an extensible system of operation objects. New operations can be added to the ``op.`` namespace, including that they are available in custom autogenerate schemes. .. seealso:: :ref:`operation_plugins` .. change:: :tags: feature, autogenerate :tickets: 301, 306 The internal system for autogenerate been reworked to build upon the extensible system of operation objects present in :ticket:`302`. As part of this change, autogenerate now produces a full object graph representing a list of migration scripts to be written as well as operation objects that will render all the Python code within them; a new hook :paramref:`.EnvironmentContext.configure.process_revision_directives` allows end-user code to fully customize what autogenerate will do, including not just full manipulation of the Python steps to take but also what file or files will be written and where. Additionally, autogenerate is now extensible as far as database objects compared and rendered into scripts; any new operation directive can also be registered into a series of hooks that allow custom database/model comparison functions to run as well as to render new operation directives into autogenerate scripts. .. seealso:: :ref:`alembic.autogenerate.toplevel` .. change:: :tags: bug, versioning :tickets: 314 Fixed bug where in the erroneous case that alembic_version contains duplicate revisions, some commands would fail to process the version history correctly and end up with a KeyError. The fix allows the versioning logic to proceed, however a clear error is emitted later when attempting to update the alembic_version table. .. changelog:: :version: 0.7.7 :released: July 22, 2015 .. change:: :tags: bug, versioning :tickets: 310 Fixed critical issue where a complex series of branches/merges would bog down the iteration algorithm working over redundant nodes for millions of cycles. An internal adjustment has been made so that duplicate nodes are skipped within this iteration. .. change:: :tags: feature, batch :tickets: 305 Implemented support for :meth:`.BatchOperations.create_primary_key` and :meth:`.BatchOperations.create_check_constraint`. Additionally, table keyword arguments are copied from the original reflected table, such as the "mysql_engine" keyword argument. .. change:: :tags: bug, environment :tickets: 300 The :meth:`.MigrationContext.stamp` method, added as part of the versioning refactor in 0.7 as a more granular version of :func:`.command.stamp`, now includes the "create the alembic_version table if not present" step in the same way as the command version, which was previously omitted. .. change:: :tags: bug, autogenerate :tickets: 298 Fixed bug where foreign key options including "onupdate", "ondelete" would not render within the ``op.create_foreign_key()`` directive, even though they render within a full ``ForeignKeyConstraint`` directive. .. change:: :tags: bug, tests Repaired warnings that occur when running unit tests against SQLAlchemy 1.0.5 or greater involving the "legacy_schema_aliasing" flag. .. changelog:: :version: 0.7.6 :released: May 5, 2015 .. change:: :tags: feature, versioning :tickets: 297 Fixed bug where the case of multiple mergepoints that all have the identical set of ancestor revisions would fail to be upgradable, producing an assertion failure. Merge points were previously assumed to always require at least an UPDATE in alembic_revision from one of the previous revs to the new one, however in this case, if one of the mergepoints has already been reached, the remaining mergepoints have no row to UPDATE therefore they must do an INSERT of their target version. .. change:: :tags: feature, autogenerate :tickets: 296 Added support for type comparison functions to be not just per environment, but also present on the custom types themselves, by supplying a method ``compare_against_backend``. Added a new documentation section :ref:`compare_types` describing type comparison fully. .. change:: :tags: feature, operations :tickets: 255 Added a new option :paramref:`.EnvironmentContext.configure.literal_binds`, which will pass the ``literal_binds`` flag into the compilation of SQL constructs when using "offline" mode. This has the effect that SQL objects like inserts, updates, deletes as well as textual statements sent using ``text()`` will be compiled such that the dialect will attempt to render literal values "inline" automatically. Only a subset of types is typically supported; the :meth:`.Operations.inline_literal` construct remains as the construct used to force a specific literal representation of a value. The :paramref:`.EnvironmentContext.configure.literal_binds` flag is added to the "offline" section of the ``env.py`` files generated in new environments. .. change:: :tags: bug, batch :tickets: 289 Fully implemented the :paramref:`~.Operations.batch_alter_table.copy_from` parameter for batch mode, which previously was not functioning. This allows "batch mode" to be usable in conjunction with ``--sql``. .. change:: :tags: bug, batch :tickets: 287 Repaired support for the :meth:`.BatchOperations.create_index` directive, which was mis-named internally such that the operation within a batch context could not proceed. The create index operation will proceed as part of a larger "batch table recreate" operation only if :paramref:`~.Operations.batch_alter_table.recreate` is set to "always", or if the batch operation includes other instructions that require a table recreate. .. changelog:: :version: 0.7.5 :released: March 19, 2015 .. change:: :tags: bug, autogenerate :tickets: 266 The ``--autogenerate`` option is not valid when used in conjunction with "offline" mode, e.g. ``--sql``. This now raises a ``CommandError``, rather than failing more deeply later on. Pull request courtesy Johannes Erdfelt. .. change:: :tags: bug, operations, mssql :tickets: 284 Fixed bug where the mssql DROP COLUMN directive failed to include modifiers such as "schema" when emitting the DDL. .. change:: :tags: bug, autogenerate, postgresql :tickets: 282 Postgresql "functional" indexes are necessarily skipped from the autogenerate process, as the SQLAlchemy backend currently does not support reflection of these structures. A warning is emitted both from the SQLAlchemy backend as well as from the Alembic backend for Postgresql when such an index is detected. .. change:: :tags: bug, autogenerate, mysql :tickets: 276 Fixed bug where MySQL backend would report dropped unique indexes and/or constraints as both at the same time. This is because MySQL doesn't actually have a "unique constraint" construct that reports differently than a "unique index", so it is present in both lists. The net effect though is that the MySQL backend will report a dropped unique index/constraint as an index in cases where the object was first created as a unique constraint, if no other information is available to make the decision. This differs from other backends like Postgresql which can report on unique constraints and unique indexes separately. .. change:: :tags: bug, commands :tickets: 269 Fixed bug where using a partial revision identifier as the "starting revision" in ``--sql`` mode in a downgrade operation would fail to resolve properly. As a side effect of this change, the :meth:`.EnvironmentContext.get_starting_revision_argument` method will return the "starting" revision in its originally- given "partial" form in all cases, whereas previously when running within the :meth:`.command.stamp` command, it would have been resolved to a full number before passing it to the :class:`.EnvironmentContext`. The resolution of this value to a real revision number has basically been moved to a more fundamental level within the offline migration process. .. change:: :tags: feature, commands Added a new feature :attr:`.Config.attributes`, to help with the use case of sharing state such as engines and connections on the outside with a series of Alembic API calls; also added a new cookbook section to describe this simple but pretty important use case. .. seealso:: :ref:`connection_sharing` .. change:: :tags: feature, environment The format of the default ``env.py`` script has been refined a bit; it now uses context managers not only for the scope of the transaction, but also for connectivity from the starting engine. The engine is also now called a "connectable" in support of the use case of an external connection being passed in. .. change:: :tags: feature, versioning :tickets: 267 Added support for "alembic stamp" to work when given "heads" as an argument, when multiple heads are present. .. changelog:: :version: 0.7.4 :released: January 12, 2015 .. change:: :tags: bug, autogenerate, postgresql :tickets: 241 Repaired issue where a server default specified without ``text()`` that represented a numeric or floating point (e.g. with decimal places) value would fail in the Postgresql-specific check for "compare server default"; as PG accepts the value with quotes in the table specification, it's still valid. Pull request courtesy Dimitris Theodorou. .. change:: :tags: bug, autogenerate :tickets: 259 The rendering of a :class:`~sqlalchemy.schema.ForeignKeyConstraint` will now ensure that the names of the source and target columns are the database-side name of each column, and not the value of the ``.key`` attribute as may be set only on the Python side. This is because Alembic generates the DDL for constraints as standalone objects without the need to actually refer to an in-Python :class:`~sqlalchemy.schema.Table` object, so there's no step that would resolve these Python-only key names to database column names. .. change:: :tags: bug, autogenerate :tickets: 260 Fixed bug in foreign key autogenerate where if the in-Python table used custom column keys (e.g. using the ``key='foo'`` kwarg to ``Column``), the comparison of existing foreign keys to those specified in the metadata would fail, as the reflected table would not have these keys available which to match up. Foreign key comparison for autogenerate now ensures it's looking at the database-side names of the columns in all cases; this matches the same functionality within unique constraints and indexes. .. change:: :tags: bug, autogenerate :tickets: 261 Fixed issue in autogenerate type rendering where types that belong to modules that have the name "sqlalchemy" in them would be mistaken as being part of the ``sqlalchemy.`` namespace. Pull req courtesy Bartosz Burclaf. .. changelog:: :version: 0.7.3 :released: December 30, 2014 .. change:: :tags: bug, versioning :tickets: 258 Fixed regression in new versioning system where upgrade / history operation would fail on AttributeError if no version files were present at all. .. changelog:: :version: 0.7.2 :released: December 18, 2014 .. change:: :tags: bug, sqlite, autogenerate Adjusted the SQLite backend regarding autogen of unique constraints to work fully with the current SQLAlchemy 1.0, which now will report on UNIQUE constraints that have no name. .. change:: :tags: bug, batch :tickets: 254 Fixed bug in batch where if the target table contained multiple foreign keys to the same target table, the batch mechanics would fail with a "table already exists" error. Thanks for the help on this from Lucas Kahlert. .. change:: :tags: bug, mysql :tickets: 251 Fixed an issue where the MySQL routine to skip foreign-key-implicit indexes would also catch unnamed unique indexes, as they would be named after the column and look like the FK indexes. Pull request courtesy Johannes Erdfelt. .. change:: :tags: bug, mssql, oracle :tickets: 253 Repaired a regression in both the MSSQL and Oracle dialects whereby the overridden ``_exec()`` method failed to return a value, as is needed now in the 0.7 series. .. changelog:: :version: 0.7.1 :released: December 3, 2014 .. change:: :tags: bug, batch The ``render_as_batch`` flag was inadvertently hardcoded to ``True``, so all autogenerates were spitting out batch mode...this has been fixed so that batch mode again is only when selected in env.py. .. change:: :tags: feature, autogenerate :tickets: 178 Support for autogenerate of FOREIGN KEY constraints has been added. These are delivered within the autogenerate process in the same manner as UNIQUE constraints, including ``include_object`` support. Big thanks to Ann Kamyshnikova for doing the heavy lifting here. .. change:: :tags: feature, batch Added :paramref:`~.Operations.batch_alter_table.naming_convention` argument to :meth:`.Operations.batch_alter_table`, as this is necessary in order to drop foreign key constraints; these are often unnamed on the target database, and in the case that they are named, SQLAlchemy is as of the 0.9 series not including these names yet. .. seealso:: :ref:`dropping_sqlite_foreign_keys` .. change:: :tags: bug, batch Fixed bug where the "source_schema" argument was not correctly passed when calling :meth:`.BatchOperations.create_foreign_key`. Pull request courtesy Malte Marquarding. .. change:: :tags: bug, batch :tickets: 249 Repaired the inspection, copying and rendering of CHECK constraints and so-called "schema" types such as Boolean, Enum within the batch copy system; the CHECK constraint will not be "doubled" when the table is copied, and additionally the inspection of the CHECK constraint for its member columns will no longer fail with an attribute error. .. change:: :tags: feature, batch Added two new arguments :paramref:`.Operations.batch_alter_table.reflect_args` and :paramref:`.Operations.batch_alter_table.reflect_kwargs`, so that arguments may be passed directly to suit the :class:`~.sqlalchemy.schema.Table` object that will be reflected. .. seealso:: :ref:`batch_controlling_table_reflection` .. changelog:: :version: 0.7.0 :released: November 24, 2014 .. change:: :tags: feature, versioning :tickets: 167 The "multiple heads / branches" feature has now landed. This is by far the most significant change Alembic has seen since its inception; while the workflow of most commands hasn't changed, and the format of version files and the ``alembic_version`` table are unchanged as well, a new suite of features opens up in the case where multiple version files refer to the same parent, or to the "base". Merging of branches, operating across distinct named heads, and multiple independent bases are now all supported. The feature incurs radical changes to the internals of versioning and traversal, and should be treated as "beta mode" for the next several subsequent releases within 0.7. .. seealso:: :ref:`branches` .. change:: :tags: feature, versioning :tickets: 124 In conjunction with support for multiple independent bases, the specific version directories are now also configurable to include multiple, user-defined directories. When multiple directories exist, the creation of a revision file with no down revision requires that the starting directory is indicated; the creation of subsequent revisions along that lineage will then automatically use that directory for new files. .. seealso:: :ref:`multiple_version_directories` .. change:: :tags: feature, operations, sqlite :tickets: 21 Added "move and copy" workflow, where a table to be altered is copied to a new one with the new structure and the old one dropped, is now implemented for SQLite as well as all database backends in general using the new :meth:`.Operations.batch_alter_table` system. This directive provides a table-specific operations context which gathers column- and constraint-level mutations specific to that table, and at the end of the context creates a new table combining the structure of the old one with the given changes, copies data from old table to new, and finally drops the old table, renaming the new one to the existing name. This is required for fully featured SQLite migrations, as SQLite has very little support for the traditional ALTER directive. The batch directive is intended to produce code that is still compatible with other databases, in that the "move and copy" process only occurs for SQLite by default, while still providing some level of sanity to SQLite's requirement by allowing multiple table mutation operations to proceed within one "move and copy" as well as providing explicit control over when this operation actually occurs. The "move and copy" feature may be optionally applied to other backends as well, however dealing with referential integrity constraints from other tables must still be handled explicitly. .. seealso:: :ref:`batch_migrations` .. change:: :tags: feature, commands Relative revision identifiers as used with ``alembic upgrade``, ``alembic downgrade`` and ``alembic history`` can be combined with specific revisions as well, e.g. ``alembic upgrade ae10+3``, to produce a migration target relative to the given exact version. .. change:: :tags: bug, commands :tickets: 248 The ``alembic revision`` command accepts the ``--sql`` option to suit some very obscure use case where the ``revision_environment`` flag is set up, so that ``env.py`` is run when ``alembic revision`` is run even though autogenerate isn't specified. As this flag is otherwise confusing, error messages are now raised if ``alembic revision`` is invoked with both ``--sql`` and ``--autogenerate`` or with ``--sql`` without ``revision_environment`` being set. .. change:: :tags: bug, autogenerate, postgresql :tickets: 247 Added a rule for Postgresql to not render a "drop unique" and "drop index" given the same name; for now it is assumed that the "index" is the implicit one Postgreql generates. Future integration with new SQLAlchemy 1.0 features will improve this to be more resilient. .. change:: :tags: bug, autogenerate :tickets: 247 A change in the ordering when columns and constraints are dropped; autogenerate will now place the "drop constraint" calls *before* the "drop column" calls, so that columns involved in those constraints still exist when the constraint is dropped. .. change:: :tags: feature, commands New commands added: ``alembic show``, ``alembic heads`` and ``alembic merge``. Also, a new option ``--verbose`` has been added to several informational commands, such as ``alembic history``, ``alembic current``, ``alembic branches``, and ``alembic heads``. ``alembic revision`` also contains several new options used within the new branch management system. The output of commands has been altered in many cases to support new fields and attributes; the ``history`` command in particular now returns it's "verbose" output only if ``--verbose`` is sent; without this flag it reverts to it's older behavior of short line items (which was never changed in the docs). .. change:: :tags: changed, commands The ``--head_only`` option to the ``alembic current`` command is deprecated; the ``current`` command now lists just the version numbers alone by default; use ``--verbose`` to get at additional output. .. change:: :tags: feature, config Added new argument :paramref:`.Config.config_args`, allows a dictionary of replacement variables to be passed which will serve as substitution values when an API-produced :class:`.Config` consumes the ``.ini`` file. Pull request courtesy Noufal Ibrahim. .. change:: :tags: bug, oracle :tickets: 245 The Oracle dialect sets "transactional DDL" to False by default, as Oracle does not support transactional DDL. .. change:: :tags: bug, autogenerate :tickets: 243 Fixed a variety of issues surrounding rendering of Python code that contains unicode literals. The first is that the "quoted_name" construct that SQLAlchemy uses to represent table and column names as well as schema names does not ``repr()`` correctly on Py2K when the value contains unicode characters; therefore an explicit stringification is added to these. Additionally, SQL expressions such as server defaults were not being generated in a unicode-safe fashion leading to decode errors if server defaults contained non-ascii characters. .. change:: :tags: bug, operations :tickets: 174 The :meth:`.Operations.add_column` directive will now additionally emit the appropriate ``CREATE INDEX`` statement if the :class:`~sqlalchemy.schema.Column` object specifies ``index=True``. Pull request courtesy David Szotten. .. change:: :tags: feature, operations :tickets: 205 The :class:`~sqlalchemy.schema.Table` object is now returned when the :meth:`.Operations.create_table` method is used. This ``Table`` is suitable for use in subsequent SQL operations, in particular the :meth:`.Operations.bulk_insert` operation. .. change:: :tags: feature, autogenerate :tickets: 203 Indexes and unique constraints are now included in the :paramref:`.EnvironmentContext.configure.include_object` hook. Indexes are sent with type ``"index"`` and unique constraints with type ``"unique_constraint"``. .. change:: :tags: bug, autogenerate :tickets: 219 Bound parameters are now resolved as "literal" values within the SQL expression inside of a CheckConstraint(), when rendering the SQL as a text string; supported for SQLAlchemy 0.8.0 and forward. .. change:: :tags: bug, autogenerate :tickets: 199 Added a workaround for SQLAlchemy issue #3023 (fixed in 0.9.5) where a column that's part of an explicit PrimaryKeyConstraint would not have its "nullable" flag set to False, thus producing a false autogenerate. Also added a related correction to MySQL which will correct for MySQL's implicit server default of '0' when a NULL integer column is turned into a primary key column. .. change:: :tags: bug, autogenerate, mysql :tickets: 240 Repaired issue related to the fix for #208 and others; a composite foreign key reported by MySQL would cause a KeyError as Alembic attempted to remove MySQL's implicitly generated indexes from the autogenerate list. .. change:: :tags: bug, autogenerate :tickets: 28 If the "alembic_version" table is present in the target metadata, autogenerate will skip this also. Pull request courtesy Dj Gilcrease. .. change:: :tags: bug, autogenerate :tickets: 77 The :paramref:`.EnvironmentContext.configure.version_table` and :paramref:`.EnvironmentContext.configure.version_table_schema` arguments are now honored during the autogenerate process, such that these names will be used as the "skip" names on both the database reflection and target metadata sides. .. change:: :tags: changed, autogenerate :tickets: 229 The default value of the :paramref:`.EnvironmentContext.configure.user_module_prefix` parameter is **no longer the same as the SQLAlchemy prefix**. When omitted, user-defined types will now use the ``__module__`` attribute of the type class itself when rendering in an autogenerated module. .. change:: :tags: bug, templates :tickets: 234 Revision files are now written out using the ``'wb'`` modifier to ``open()``, since Mako reads the templates with ``'rb'``, thus preventing CRs from being doubled up as has been observed on windows. The encoding of the output now defaults to 'utf-8', which can be configured using a newly added config file parameter ``output_encoding``. .. change:: :tags: bug, operations :tickets: 230 Added support for use of the :class:`~sqlalchemy.sql.elements.quoted_name` construct when using the ``schema`` argument within operations. This allows a name containing a dot to be fully quoted, as well as to provide configurable quoting on a per-name basis. .. change:: :tags: bug, autogenerate, postgresql :tickets: 73 Added a routine by which the Postgresql Alembic dialect inspects the server default of INTEGER/BIGINT columns as they are reflected during autogenerate for the pattern ``nextval(...)`` containing a potential sequence name, then queries ``pg_catalog`` to see if this sequence is "owned" by the column being reflected; if so, it assumes this is a SERIAL or BIGSERIAL column and the server default is omitted from the column reflection as well as any kind of server_default comparison or rendering, along with an INFO message in the logs indicating this has taken place. This allows SERIAL/BIGSERIAL columns to keep the SEQUENCE from being unnecessarily present within the autogenerate operation. .. change:: :tags: bug, autogenerate :tickets: 197, 64, 196 The system by which autogenerate renders expressions within a :class:`~sqlalchemy.schema.Index`, the ``server_default`` of :class:`~sqlalchemy.schema.Column`, and the ``existing_server_default`` of :meth:`.Operations.alter_column` has been overhauled to anticipate arbitrary SQLAlchemy SQL constructs, such as ``func.somefunction()``, ``cast()``, ``desc()``, and others. The system does not, as might be preferred, render the full-blown Python expression as originally created within the application's source code, as this would be exceedingly complex and difficult. Instead, it renders the SQL expression against the target backend that's subject to the autogenerate, and then renders that SQL inside of a :func:`~sqlalchemy.sql.expression.text` construct as a literal SQL string. This approach still has the downside that the rendered SQL construct may not be backend-agnostic in all cases, so there is still a need for manual intervention in that small number of cases, but overall the majority of cases should work correctly now. Big thanks to Carlos Rivera for pull requests and support on this. .. change:: :tags: feature SQLAlchemy's testing infrastructure is now used to run tests. This system supports both nose and pytest and opens the way for Alembic testing to support any number of backends, parallel testing, and 3rd party dialect testing. .. change:: :tags: changed, compatibility Minimum SQLAlchemy version is now 0.7.6, however at least 0.8.4 is strongly recommended. The overhaul of the test suite allows for fully passing tests on all SQLAlchemy versions from 0.7.6 on forward. .. change:: :tags: bug, operations The "match" keyword is not sent to :class:`.ForeignKeyConstraint` by :meth:`.Operations.create_foreign_key` when SQLAlchemy 0.7 is in use; this keyword was added to SQLAlchemy as of 0.8.0. .. changelog:: :version: 0.6.7 :released: September 9, 2014 .. change:: :tags: bug, mssql Fixed bug in MSSQL dialect where "rename table" wasn't using ``sp_rename()`` as is required on SQL Server. Pull request courtesy Łukasz Bołdys. .. change:: :tags: feature :tickets: 222 Added support for functional indexes when using the :meth:`.Operations.create_index` directive. Within the list of columns, the SQLAlchemy ``text()`` construct can be sent, embedding a literal SQL expression; the :meth:`.Operations.create_index` will perform some hackery behind the scenes to get the :class:`.Index` construct to cooperate. This works around some current limitations in :class:`.Index` which should be resolved on the SQLAlchemy side at some point. .. changelog:: :version: 0.6.6 :released: August 7, 2014 .. change:: :tags: bug :tickets: 95 A file named ``__init__.py`` in the ``versions/`` directory is now ignored by Alembic when the collection of version files is retrieved. Pull request courtesy Michael Floering. .. change:: :tags: bug Fixed Py3K bug where an attempt would be made to sort None against string values when autogenerate would detect tables across multiple schemas, including the default schema. Pull request courtesy paradoxxxzero. .. change:: :tags: bug Autogenerate render will render the arguments within a Table construct using ``*[...]`` when the number of columns/elements is greater than 255. Pull request courtesy Ryan P. Kelly. .. change:: :tags: bug Fixed bug where foreign key constraints would fail to render in autogenerate when a schema name was present. Pull request courtesy Andreas Zeidler. .. change:: :tags: bug :tickets: 212 Some deep-in-the-weeds fixes to try to get "server default" comparison working better across platforms and expressions, in particular on the Postgresql backend, mostly dealing with quoting/not quoting of various expressions at the appropriate time and on a per-backend basis. Repaired and tested support for such defaults as Postgresql interval and array defaults. .. change:: :tags: enhancement :tickets: 209 When a run of Alembic command line fails due to ``CommandError``, the output now prefixes the string with ``"FAILED:"``, and the error is also written to the log output using ``log.error()``. .. change:: :tags: bug :tickets: 208 Liberalized even more the check for MySQL indexes that shouldn't be counted in autogenerate as "drops"; this time it's been reported that an implicitly created index might be named the same as a composite foreign key constraint, and not the actual columns, so we now skip those when detected as well. .. change:: :tags: feature Added a new accessor :attr:`.MigrationContext.config`, when used in conjunction with a :class:`.EnvironmentContext` and :class:`.Config`, this config will be returned. Patch courtesy Marc Abramowitz. .. changelog:: :version: 0.6.5 :released: May 3, 2014 .. change:: :tags: bug, autogenerate, mysql :tickets: 202 This releases' "autogenerate index detection" bug, when a MySQL table includes an Index with the same name as a column, autogenerate reported it as an "add" even though its not; this is because we ignore reflected indexes of this nature due to MySQL creating them implicitly. Indexes that are named the same as a column are now ignored on MySQL if we see that the backend is reporting that it already exists; this indicates that we can still detect additions of these indexes but not drops, as we cannot distinguish a backend index same-named as the column as one that is user generated or mysql-generated. .. change:: :tags: feature, environment :tickets: 201 Added new feature :paramref:`.EnvironmentContext.configure.transaction_per_migration`, which when True causes the BEGIN/COMMIT pair to incur for each migration individually, rather than for the whole series of migrations. This is to assist with some database directives that need to be within individual transactions, without the need to disable transactional DDL entirely. .. change:: :tags: bug, autogenerate :tickets: 200 Fixed bug where the ``include_object()`` filter would not receive the original :class:`.Column` object when evaluating a database-only column to be dropped; the object would not include the parent :class:`.Table` nor other aspects of the column that are important for generating the "downgrade" case where the column is recreated. .. change:: :tags: bug, environment :tickets: 195 Fixed bug where :meth:`.EnvironmentContext.get_x_argument` would fail if the :class:`.Config` in use didn't actually originate from a command line call. .. change:: :tags: bug, autogenerate :tickets: 194 Fixed another bug regarding naming conventions, continuing from :ticket:`183`, where add_index() drop_index() directives would not correctly render the ``f()`` construct when the index contained a convention-driven name. .. changelog:: :version: 0.6.4 :released: March 28, 2014 .. change:: :tags: bug, mssql :tickets: 186 Added quoting to the table name when the special EXEC is run to drop any existing server defaults or constraints when the :paramref:`.drop_column.mssql_drop_check` or :paramref:`.drop_column.mssql_drop_default` arguments are used. .. change:: :tags: bug, mysql :tickets: 103 Added/fixed support for MySQL "SET DEFAULT" / "DROP DEFAULT" phrases, which will now be rendered if only the server default is changing or being dropped (e.g. specify None to alter_column() to indicate "DROP DEFAULT"). Also added support for rendering MODIFY rather than CHANGE when the column name isn't changing. .. change:: :tags: bug :tickets: 190 Added support for the ``initially``, ``match`` keyword arguments as well as dialect-specific keyword arguments to :meth:`.Operations.create_foreign_key`. :tags: feature :tickets: 163 Altered the support for "sourceless" migration files (e.g. only .pyc or .pyo present) so that the flag "sourceless=true" needs to be in alembic.ini for this behavior to take effect. .. change:: :tags: bug, mssql :tickets: 185 The feature that keeps on giving, index/unique constraint autogenerate detection, has even more fixes, this time to accommodate database dialects that both don't yet report on unique constraints, but the backend does report unique constraints as indexes. The logic Alembic uses to distinguish between "this is an index!" vs. "this is a unique constraint that is also reported as an index!" has now been further enhanced to not produce unwanted migrations when the dialect is observed to not yet implement get_unique_constraints() (e.g. mssql). Note that such a backend will no longer report index drops for unique indexes, as these cannot be distinguished from an unreported unique index. .. change:: :tags: bug :tickets: 183 Extensive changes have been made to more fully support SQLAlchemy's new naming conventions feature. Note that while SQLAlchemy has added this feature as of 0.9.2, some additional fixes in 0.9.4 are needed to resolve some of the issues: 1. The :class:`.Operations` object now takes into account the naming conventions that are present on the :class:`.MetaData` object that's associated using :paramref:`~.EnvironmentContext.configure.target_metadata`. When :class:`.Operations` renders a constraint directive like ``ADD CONSTRAINT``, it now will make use of this naming convention when it produces its own temporary :class:`.MetaData` object. 2. Note however that the autogenerate feature in most cases generates constraints like foreign keys and unique constraints with the final names intact; the only exception are the constraints implicit with a schema-type like Boolean or Enum. In most of these cases, the naming convention feature will not take effect for these constraints and will instead use the given name as is, with one exception.... 3. Naming conventions which use the ``"%(constraint_name)s"`` token, that is, produce a new name that uses the original name as a component, will still be pulled into the naming convention converter and be converted. The problem arises when autogenerate renders a constraint with it's already-generated name present in the migration file's source code, the name will be doubled up at render time due to the combination of #1 and #2. So to work around this, autogenerate now renders these already-tokenized names using the new :meth:`.Operations.f` component. This component is only generated if **SQLAlchemy 0.9.4** or greater is in use. Therefore it is highly recommended that an upgrade to Alembic 0.6.4 be accompanied by an upgrade of SQLAlchemy 0.9.4, if the new naming conventions feature is used. .. seealso:: :ref:`autogen_naming_conventions` .. change:: :tags: bug :tickets: 160 Suppressed IOErrors which can raise when program output pipe is closed under a program like ``head``; however this only works on Python 2. On Python 3, there is not yet a known way to suppress the BrokenPipeError warnings without prematurely terminating the program via signals. .. change:: :tags: bug :tickets: 179 Fixed bug where :meth:`.Operations.bulk_insert` would not function properly when :meth:`.Operations.inline_literal` values were used, either in --sql or non-sql mode. The values will now render directly in --sql mode. For compatibility with "online" mode, a new flag :paramref:`~.Operations.bulk_insert.multiinsert` can be set to False which will cause each parameter set to be compiled and executed with individual INSERT statements. .. change:: :tags: bug, py3k :tickets: 175 Fixed a failure of the system that allows "legacy keyword arguments" to be understood, which arose as of a change in Python 3.4 regarding decorators. A workaround is applied that allows the code to work across Python 3 versions. .. change:: :tags: feature The :func:`.command.revision` command now returns the :class:`.Script` object corresponding to the newly generated revision. From this structure, one can get the revision id, the module documentation, and everything else, for use in scripts that call upon this command. Pull request courtesy Robbie Coomber. .. changelog:: :version: 0.6.3 :released: February 2, 2014 .. change:: :tags: bug :tickets: 172 Added a workaround for when we call ``fcntl.ioctl()`` to get at ``TERMWIDTH``; if the function returns zero, as is reported to occur in some pseudo-ttys, the message wrapping system is disabled in the same way as if ``ioctl()`` failed. .. change:: :tags: feature :tickets: 171 Added new argument :paramref:`.EnvironmentContext.configure.user_module_prefix`. This prefix is applied when autogenerate renders a user-defined type, which here is defined as any type that is from a module outside of the ``sqlalchemy.`` hierarchy. This prefix defaults to ``None``, in which case the :paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix` is used, thus preserving the current behavior. .. change:: :tags: bug :tickets: 170 Added support for autogenerate covering the use case where :class:`.Table` objects specified in the metadata have an explicit ``schema`` attribute whose name matches that of the connection's default schema (e.g. "public" for Postgresql). Previously, it was assumed that "schema" was ``None`` when it matched the "default" schema, now the comparison adjusts for this. .. change:: :tags: bug The :func:`.compare_metadata` public API function now takes into account the settings for :paramref:`.EnvironmentContext.configure.include_object`, :paramref:`.EnvironmentContext.configure.include_symbol`, and :paramref:`.EnvironmentContext.configure.include_schemas`, in the same way that the ``--autogenerate`` command does. Pull request courtesy Roman Podoliaka. .. change:: :tags: bug :tickets: 168 Calling :func:`.bulk_insert` with an empty list will not emit any commands on the current connection. This was already the case with ``--sql`` mode, so is now the case with "online" mode. .. change:: :tags: bug Enabled schema support for index and unique constraint autodetection; previously these were non-functional and could in some cases lead to attribute errors. Pull request courtesy Dimitris Theodorou. .. change:: :tags: bug :tickets: 164 More fixes to index autodetection; indexes created with expressions like DESC or functional indexes will no longer cause AttributeError exceptions when attempting to compare the columns. .. change:: :tags: feature :tickets: 163 The :class:`.ScriptDirectory` system that loads migration files from a ``versions/`` directory now supports so-called "sourceless" operation, where the ``.py`` files are not present and instead ``.pyc`` or ``.pyo`` files are directly present where the ``.py`` files should be. Note that while Python 3.3 has a new system of locating ``.pyc``/``.pyo`` files within a directory called ``__pycache__`` (e.g. PEP-3147), PEP-3147 maintains support for the "source-less imports" use case, where the ``.pyc``/``.pyo`` are in present in the "old" location, e.g. next to the ``.py`` file; this is the usage that's supported even when running Python3.3. .. changelog:: :version: 0.6.2 :released: Fri Dec 27 2013 .. change:: :tags: bug Autogenerate for ``op.create_table()`` will not include a ``PrimaryKeyConstraint()`` that has no columns. .. change:: :tags: bug Fixed bug in the not-internally-used :meth:`.ScriptDirectory.get_base` method which would fail if called on an empty versions directory. .. change:: :tags: bug :tickets: 157 An almost-rewrite of the new unique constraint/index autogenerate detection, to accommodate a variety of issues. The emphasis is on not generating false positives for those cases where no net change is present, as these errors are the ones that impact all autogenerate runs: * Fixed an issue with unique constraint autogenerate detection where a named ``UniqueConstraint`` on both sides with column changes would render with the "add" operation before the "drop", requiring the user to reverse the order manually. * Corrected for MySQL's apparent addition of an implicit index for a foreign key column, so that it doesn't show up as "removed". This required that the index/constraint autogen system query the dialect-specific implementation for special exceptions. * reworked the "dedupe" logic to accommodate MySQL's bi-directional duplication of unique indexes as unique constraints, and unique constraints as unique indexes. Postgresql's slightly different logic of duplicating unique constraints into unique indexes continues to be accommodated as well. Note that a unique index or unique constraint removal on a backend that duplicates these may show up as a distinct "remove_constraint()" / "remove_index()" pair, which may need to be corrected in the post-autogenerate if multiple backends are being supported. * added another dialect-specific exception to the SQLite backend when dealing with unnamed unique constraints, as the backend can't currently report on constraints that were made with this technique, hence they'd come out as "added" on every run. * the ``op.create_table()`` directive will be auto-generated with the ``UniqueConstraint`` objects inline, but will not double them up with a separate ``create_unique_constraint()`` call, which may have been occurring. Indexes still get rendered as distinct ``op.create_index()`` calls even when the corresponding table was created in the same script. * the inline ``UniqueConstraint`` within ``op.create_table()`` includes all the options like ``deferrable``, ``initially``, etc. Previously these weren't rendering. .. change:: :tags: feature, mssql Added new argument ``mssql_drop_foreign_key`` to :meth:`.Operations.drop_column`. Like ``mssql_drop_default`` and ``mssql_drop_check``, will do an inline lookup for a single foreign key which applies to this column, and drop it. For a column with more than one FK, you'd still need to explicitly use :meth:`.Operations.drop_constraint` given the name, even though only MSSQL has this limitation in the first place. .. change:: :tags: bug, mssql The MSSQL backend will add the batch separator (e.g. ``"GO"``) in ``--sql`` mode after the final ``COMMIT`` statement, to ensure that statement is also processed in batch mode. Courtesy Derek Harland. .. changelog:: :version: 0.6.1 :released: Wed Nov 27 2013 .. change:: :tags: bug, mysql :tickets: 152 Fixed bug where :func:`.op.alter_column` in the MySQL dialect would fail to apply quotes to column names that had mixed casing or spaces. .. change:: :tags: feature Expanded the size of the "slug" generated by "revision" to 40 characters, which is also configurable by new field ``truncate_slug_length``; and also split on the word rather than the character; courtesy Frozenball. .. change:: :tags: bug :tickets: 135 Fixed the output wrapping for Alembic message output, so that we either get the terminal width for "pretty printing" with indentation, or if not we just output the text as is; in any case the text won't be wrapped too short. .. change:: :tags: bug Fixes to Py3k in-place compatibility regarding output encoding and related; the use of the new io.* package introduced some incompatibilities on Py2k. These should be resolved, due to the introduction of new adapter types for translating from io.* to Py2k file types, StringIO types. Thanks to Javier Santacruz for help with this. .. change:: :tags: bug :tickets: 145 Fixed py3k bug where the wrong form of ``next()`` was being called when using the list_templates command. Courtesy Chris Wilkes. .. change:: :tags: feature :tickets: 107 Support for autogeneration detection and rendering of indexes and unique constraints has been added. The logic goes through some effort in order to differentiate between true unique constraints and unique indexes, where there are some quirks on backends like Postgresql. The effort here in producing the feature and tests is courtesy of IJL. .. change:: :tags: bug Fixed bug introduced by new ``include_object`` argument where the inspected column would be misinterpreted when using a user-defined type comparison function, causing a KeyError or similar expression-related error. Fix courtesy Maarten van Schaik. .. change:: :tags: bug Added the "deferrable" keyword argument to :func:`.op.create_foreign_key` so that ``DEFERRABLE`` constraint generation is supported; courtesy Pedro Romano. .. change:: :tags: bug :tickets: 137 Ensured that strings going to stdout go through an encode/decode phase, so that any non-ASCII characters get to the output stream correctly in both Py2k and Py3k. Also added source encoding detection using Mako's parse_encoding() routine in Py2k so that the __doc__ of a non-ascii revision file can be treated as unicode in Py2k. .. changelog:: :version: 0.6.0 :released: Fri July 19 2013 .. change:: :tags: feature :tickets: 101 Added new kw argument to :meth:`.EnvironmentContext.configure` ``include_object``. This is a more flexible version of the ``include_symbol`` argument which allows filtering of columns as well as tables from the autogenerate process, and in the future will also work for types, constraints and other constructs. The fully constructed schema object is passed, including its name and type as well as a flag indicating if the object is from the local application metadata or is reflected. .. change:: :tags: feature The output of the ``alembic history`` command is now expanded to show information about each change on multiple lines, including the full top message, resembling the formatting of git log. .. change:: :tags: feature Added :attr:`alembic.config.Config.cmd_opts` attribute, allows access to the ``argparse`` options passed to the ``alembic`` runner. .. change:: :tags: feature :tickets: 120 Added new command line argument ``-x``, allows extra arguments to be appended to the command line which can be consumed within an ``env.py`` script by looking at ``context.config.cmd_opts.x``, or more simply a new method :meth:`.EnvironmentContext.get_x_argument`. .. change:: :tags: bug :tickets: 125 Added support for options like "name" etc. to be rendered within CHECK constraints in autogenerate. Courtesy Sok Ann Yap. .. change:: :tags: misc Source repository has been moved from Mercurial to Git. .. change:: :tags: bug Repaired autogenerate rendering of ForeignKeyConstraint to include use_alter argument, if present. .. change:: :tags: feature Added ``-r`` argument to ``alembic history`` command, allows specification of ``[start]:[end]`` to view a slice of history. Accepts revision numbers, symbols "base", "head", a new symbol "current" representing the current migration, as well as relative ranges for one side at a time (i.e. ``-r-5:head``, ``-rcurrent:+3``). Courtesy Atsushi Odagiri for this feature. .. change:: :tags: feature :tickets: 55 Source base is now in-place for Python 2.6 through 3.3, without the need for 2to3. Support for Python 2.5 and below has been dropped. Huge thanks to Hong Minhee for all the effort on this! .. changelog:: :version: 0.5.0 :released: Thu Apr 4 2013 .. note:: Alembic 0.5.0 now requires at least version 0.7.3 of SQLAlchemy to run properly. Support for 0.6 has been dropped. .. change:: :tags: feature :tickets: 76 Added ``version_table_schema`` argument to :meth:`.EnvironmentContext.configure`, complements the ``version_table`` argument to set an optional remote schema for the version table. Courtesy Christian Blume. .. change:: :tags: bug, postgresql :tickets: 32 Fixed format of RENAME for table that includes schema with Postgresql; the schema name shouldn't be in the "TO" field. .. change:: :tags: feature :tickets: 90 Added ``output_encoding`` option to :meth:`.EnvironmentContext.configure`, used with ``--sql`` mode to apply an encoding to the output stream. .. change:: :tags: feature :tickets: 93 Added :meth:`.Operations.create_primary_key` operation, will genenerate an ADD CONSTRAINT for a primary key. .. change:: :tags: bug, mssql :tickets: 109 Fixed bug whereby double quoting would be applied to target column name during an ``sp_rename`` operation. .. change:: :tags: bug, sqlite, mysql :tickets: 112 transactional_ddl flag for SQLite, MySQL dialects set to False. MySQL doesn't support it, SQLite does but current pysqlite driver does not. .. change:: :tags: feature :tickets: 115 upgrade and downgrade commands will list the first line of the docstring out next to the version number. Courtesy Hong Minhee. .. change:: :tags: feature Added --head-only option to "alembic current", will print current version plus the symbol "(head)" if this version is the head or not. Courtesy Charles-Axel Dein. .. change:: :tags: bug :tickets: 110 Autogenerate will render additional table keyword arguments like "mysql_engine" and others within op.create_table(). .. change:: :tags: feature :tickets: 108 The rendering of any construct during autogenerate can be customized, in particular to allow special rendering for user-defined column, constraint subclasses, using new ``render_item`` argument to :meth:`.EnvironmentContext.configure`. .. change:: :tags: bug Fixed bug whereby create_index() would include in the constraint columns that are added to all Table objects using events, externally to the generation of the constraint. This is the same issue that was fixed for unique constraints in version 0.3.2. .. change:: :tags: bug Worked around a backwards-incompatible regression in Python3.3 regarding argparse; running "alembic" with no arguments now yields an informative error in py3.3 as with all previous versions. Courtesy Andrey Antukh. .. change:: :tags: change SQLAlchemy 0.6 is no longer supported by Alembic - minimum version is 0.7.3, full support is as of 0.7.9. .. change:: :tags: bug :tickets: 104 A host of argument name changes within migration operations for consistency. Keyword arguments will continue to work on the old name for backwards compatibility, however required positional arguments will not: :meth:`.Operations.alter_column` - ``name`` -> ``new_column_name`` - old name will work for backwards compatibility. :meth:`.Operations.create_index` - ``tablename`` -> ``table_name`` - argument is positional. :meth:`.Operations.drop_index` - ``tablename`` -> ``table_name`` - old name will work for backwards compatibility. :meth:`.Operations.drop_constraint` - ``tablename`` -> ``table_name`` - argument is positional. :meth:`.Operations.drop_constraint` - ``type`` -> ``type_`` - old name will work for backwards compatibility .. changelog:: :version: 0.4.2 :released: Fri Jan 11 2013 .. change:: :tags: bug, autogenerate :tickets: 99 Fixed bug where autogenerate would fail if a Column to be added to a table made use of the ".key" parameter. .. change:: :tags: bug, sqlite :tickets: 98 The "implicit" constraint generated by a type such as Boolean or Enum will not generate an ALTER statement when run on SQlite, which does not support ALTER for the purpose of adding/removing constraints separate from the column def itself. While SQLite supports adding a CHECK constraint at the column level, SQLAlchemy would need modification to support this. A warning is emitted indicating this constraint cannot be added in this scenario. .. change:: :tags: bug :tickets: 96 Added a workaround to setup.py to prevent "NoneType" error from occurring when "setup.py test" is run. .. change:: :tags: bug :tickets: 96 Added an append_constraint() step to each condition within test_autogenerate:AutogenRenderTest.test_render_fk_constraint_kwarg if the SQLAlchemy version is less than 0.8, as ForeignKeyConstraint does not auto-append prior to 0.8. .. change:: :tags: feature :tickets: 96 Added a README.unittests with instructions for running the test suite fully. .. changelog:: :version: 0.4.1 :released: Sun Dec 9 2012 .. change:: :tags: bug :tickets: 92 Added support for autogenerate render of ForeignKeyConstraint options onupdate, ondelete, initially, and deferred. .. change:: :tags: bug :tickets: 94 Autogenerate will include "autoincrement=False" in the rendered table metadata if this flag was set to false on the source :class:`.Column` object. .. change:: :tags: feature :tickets: 66 Explicit error message describing the case when downgrade --sql is used without specifying specific start/end versions. .. change:: :tags: bug :tickets: 81 Removed erroneous "emit_events" attribute from operations.create_table() documentation. .. change:: :tags: bug :tickets: Fixed the minute component in file_template which returned the month part of the create date. .. changelog:: :version: 0.4.0 :released: Mon Oct 01 2012 .. change:: :tags: feature :tickets: 33 Support for tables in alternate schemas has been added fully to all operations, as well as to the autogenerate feature. When using autogenerate, specifying the flag include_schemas=True to Environment.configure() will also cause autogenerate to scan all schemas located by Inspector.get_schema_names(), which is supported by *some* (but not all) SQLAlchemy dialects including Postgresql. *Enormous* thanks to Bruno Binet for a huge effort in implementing as well as writing tests. . .. change:: :tags: feature :tickets: 70 The command line runner has been organized into a reusable CommandLine object, so that other front-ends can re-use the argument parsing built in. .. change:: :tags: feature :tickets: 43 Added "stdout" option to Config, provides control over where the "print" output of commands like "history", "init", "current" etc. are sent. .. change:: :tags: bug :tickets: 71 Fixed the "multidb" template which was badly out of date. It now generates revision files using the configuration to determine the different upgrade_() methods needed as well, instead of needing to hardcode these. Huge thanks to BryceLohr for doing the heavy lifting here. .. change:: :tags: bug :tickets: 72 Fixed the regexp that was checking for .py files in the version directory to allow any .py file through. Previously it was doing some kind of defensive checking, probably from some early notions of how this directory works, that was prohibiting various filename patterns such as those which begin with numbers. .. change:: :tags: bug :tickets: Fixed MySQL rendering for server_default which didn't work if the server_default was a generated SQL expression. Courtesy Moriyoshi Koizumi. .. change:: :tags: feature :tickets: Added support for alteration of MySQL columns that have AUTO_INCREMENT, as well as enabling this flag. Courtesy Moriyoshi Koizumi. .. changelog:: :version: 0.3.6 :released: Wed Aug 15 2012 .. change:: :tags: feature :tickets: 27 Added include_symbol option to EnvironmentContext.configure(), specifies a callable which will include/exclude tables in their entirety from the autogeneration process based on name. .. change:: :tags: feature :tickets: 59 Added year, month, day, hour, minute, second variables to file_template. .. change:: :tags: feature :tickets: Added 'primary' to the list of constraint types recognized for MySQL drop_constraint(). .. change:: :tags: feature :tickets: Added --sql argument to the "revision" command, for the use case where the "revision_environment" config option is being used but SQL access isn't desired. .. change:: :tags: bug :tickets: Repaired create_foreign_key() for self-referential foreign keys, which weren't working at all. .. change:: :tags: bug :tickets: 63 'alembic' command reports an informative error message when the configuration is missing the 'script_directory' key. .. change:: :tags: bug :tickets: 62 Fixes made to the constraints created/dropped alongside so-called "schema" types such as Boolean and Enum. The create/drop constraint logic does not kick in when using a dialect that doesn't use constraints for these types, such as postgresql, even when existing_type is specified to alter_column(). Additionally, the constraints are not affected if existing_type is passed but type\_ is not, i.e. there's no net change in type. .. change:: :tags: bug :tickets: 66 Improved error message when specifying non-ordered revision identifiers to cover the case when the "higher" rev is None, improved message overall. .. changelog:: :version: 0.3.5 :released: Sun Jul 08 2012 .. change:: :tags: bug :tickets: 31 Fixed issue whereby reflected server defaults wouldn't be quoted correctly; uses repr() now. .. change:: :tags: bug :tickets: 58 Fixed issue whereby when autogenerate would render create_table() on the upgrade side for a table that has a Boolean type, an unnecessary CheckConstraint() would be generated. .. change:: :tags: feature :tickets: Implemented SQL rendering for CheckConstraint() within autogenerate upgrade, including for literal SQL as well as SQL Expression Language expressions. .. changelog:: :version: 0.3.4 :released: Sat Jun 02 2012 .. change:: :tags: bug :tickets: Fixed command-line bug introduced by the "revision_environment" feature. .. changelog:: :version: 0.3.3 :released: Sat Jun 02 2012 .. change:: :tags: feature :tickets: New config argument "revision_environment=true", causes env.py to be run unconditionally when the "revision" command is run, to support script.py.mako templates with dependencies on custom "template_args". .. change:: :tags: feature :tickets: Added "template_args" option to configure() so that an env.py can add additional arguments to the template context when running the "revision" command. This requires either --autogenerate or the configuration directive "revision_environment=true". .. change:: :tags: bug :tickets: 44 Added "type" argument to op.drop_constraint(), and implemented full constraint drop support for MySQL. CHECK and undefined raise an error. MySQL needs the constraint type in order to emit a DROP CONSTRAINT. .. change:: :tags: feature :tickets: 34 Added version_table argument to EnvironmentContext.configure(), allowing for the configuration of the version table name. .. change:: :tags: feature :tickets: Added support for "relative" migration identifiers, i.e. "alembic upgrade +2", "alembic downgrade -1". Courtesy Atsushi Odagiri for this feature. .. change:: :tags: bug :tickets: 49 Fixed bug whereby directories inside of the template directories, such as __pycache__ on Pypy, would mistakenly be interpreted as files which are part of the template. .. changelog:: :version: 0.3.2 :released: Mon Apr 30 2012 .. change:: :tags: feature :tickets: 40 Basic support for Oracle added, courtesy shgoh. .. change:: :tags: feature :tickets: Added support for UniqueConstraint in autogenerate, courtesy Atsushi Odagiri .. change:: :tags: bug :tickets: Fixed support of schema-qualified ForeignKey target in column alter operations, courtesy Alexander Kolov. .. change:: :tags: bug :tickets: Fixed bug whereby create_unique_constraint() would include in the constraint columns that are added to all Table objects using events, externally to the generation of the constraint. .. changelog:: :version: 0.3.1 :released: Sat Apr 07 2012 .. change:: :tags: bug :tickets: 41 bulk_insert() fixes: 1. bulk_insert() operation was not working most likely since the 0.2 series when used with an engine. 2. Repaired bulk_insert() to complete when used against a lower-case-t table and executing with only one set of parameters, working around SQLAlchemy bug #2461 in this regard. 3. bulk_insert() uses "inline=True" so that phrases like RETURNING and such don't get invoked for single-row bulk inserts. 4. bulk_insert() will check that you're passing a list of dictionaries in, raises TypeError if not detected. .. changelog:: :version: 0.3.0 :released: Thu Apr 05 2012 .. change:: :tags: general :tickets: The focus of 0.3 is to clean up and more fully document the public API of Alembic, including better accessors on the MigrationContext and ScriptDirectory objects. Methods that are not considered to be public on these objects have been underscored, and methods which should be public have been cleaned up and documented, including: MigrationContext.get_current_revision() ScriptDirectory.iterate_revisions() ScriptDirectory.get_current_head() ScriptDirectory.get_heads() ScriptDirectory.get_base() ScriptDirectory.generate_revision() .. change:: :tags: feature :tickets: Added a bit of autogenerate to the public API in the form of the function alembic.autogenerate.compare_metadata. .. changelog:: :version: 0.2.2 :released: Mon Mar 12 2012 .. change:: :tags: feature :tickets: Informative error message when op.XYZ directives are invoked at module import time. .. change:: :tags: bug :tickets: 35 Fixed inappropriate direct call to util.err() and therefore sys.exit() when Config failed to locate the config file within library usage. .. change:: :tags: bug :tickets: Autogenerate will emit CREATE TABLE and DROP TABLE directives according to foreign key dependency order. .. change:: :tags: bug :tickets: implement 'tablename' parameter on drop_index() as this is needed by some backends. .. change:: :tags: feature :tickets: Added execution_options parameter to op.execute(), will call execution_options() on the Connection before executing. The immediate use case here is to allow access to the new no_parameters option in SQLAlchemy 0.7.6, which allows some DBAPIs (psycopg2, MySQLdb) to allow percent signs straight through without escaping, thus providing cross-compatible operation with DBAPI execution and static script generation. .. change:: :tags: bug :tickets: setup.py won't install argparse if on Python 2.7/3.2 .. change:: :tags: feature :tickets: 29 script_location can be interpreted by pkg_resources.resource_filename(), if it is a non-absolute URI that contains colons. This scheme is the same one used by Pyramid. .. change:: :tags: feature :tickets: added missing support for onupdate/ondelete flags for ForeignKeyConstraint, courtesy Giacomo Bagnoli .. change:: :tags: bug :tickets: 30 fixed a regression regarding an autogenerate error message, as well as various glitches in the Pylons sample template. The Pylons sample template requires that you tell it where to get the Engine from now. courtesy Marcin Kuzminski .. change:: :tags: bug :tickets: drop_index() ensures a dummy column is added when it calls "Index", as SQLAlchemy 0.7.6 will warn on index with no column names. .. changelog:: :version: 0.2.1 :released: Tue Jan 31 2012 .. change:: :tags: bug :tickets: 26 Fixed the generation of CHECK constraint, regression from 0.2.0 .. changelog:: :version: 0.2.0 :released: Mon Jan 30 2012 .. change:: :tags: feature :tickets: 19 API rearrangement allows everything Alembic does to be represented by contextual objects, including EnvironmentContext, MigrationContext, and Operations. Other libraries and applications can now use things like "alembic.op" without relying upon global configuration variables. The rearrangement was done such that existing migrations should be OK, as long as they use the pattern of "from alembic import context" and "from alembic import op", as these are now contextual objects, not modules. .. change:: :tags: feature :tickets: 24 The naming of revision files can now be customized to be some combination of "rev id" and "slug", the latter of which is based on the revision message. By default, the pattern "_" is used for new files. New script files should include the "revision" variable for this to work, which is part of the newer script.py.mako scripts. .. change:: :tags: bug :tickets: 25 env.py templates call connection.close() to better support programmatic usage of commands; use NullPool in conjunction with create_engine() as well so that no connection resources remain afterwards. .. change:: :tags: bug :tickets: 22 fix the config.main() function to honor the arguments passed, remove no longer used "scripts/alembic" as setuptools creates this for us. .. change:: :tags: bug :tickets: Fixed alteration of column type on MSSQL to not include the keyword "TYPE". .. change:: :tags: feature :tickets: 23 Can create alembic.config.Config with no filename, use set_main_option() to add values. Also added set_section_option() which will add sections. .. changelog:: :version: 0.1.1 :released: Wed Jan 04 2012 .. change:: :tags: bug :tickets: Clean up file write operations so that file handles are closed. .. change:: :tags: feature :tickets: PyPy is supported. .. change:: :tags: feature :tickets: Python 2.5 is supported, needs __future__.with_statement .. change:: :tags: bug :tickets: Fix autogenerate so that "pass" is generated between the two comments if no net migrations were present. .. change:: :tags: bug :tickets: 16 Fix autogenerate bug that prevented correct reflection of a foreign-key referenced table in the list of "to remove". .. change:: :tags: bug :tickets: 17 Fix bug where create_table() didn't handle self-referential foreign key correctly .. change:: :tags: bug :tickets: 18 Default prefix for autogenerate directives is "op.", matching the mako templates. .. change:: :tags: feature :tickets: 18 Add alembic_module_prefix argument to configure() to complement sqlalchemy_module_prefix. .. change:: :tags: bug :tickets: 14 fix quotes not being rendered in ForeignKeConstraint during autogenerate .. changelog:: :version: 0.1.0 :released: Wed Nov 30 2011 .. change:: :tags: :tickets: Initial release. Status of features: .. change:: :tags: :tickets: Alembic is used in at least one production environment, but should still be considered ALPHA LEVEL SOFTWARE as of this release, particularly in that many features are expected to be missing / unimplemented. Major API changes are not anticipated but for the moment nothing should be assumed. The author asks that you *please* report all issues, missing features, workarounds etc. to the bugtracker. .. change:: :tags: :tickets: Python 3 is supported and has been tested. .. change:: :tags: :tickets: The "Pylons" and "MultiDB" environment templates have not been directly tested - these should be considered to be samples to be modified as needed. Multiple database support itself is well tested, however. .. change:: :tags: :tickets: Postgresql and MS SQL Server environments have been tested for several weeks in a production environment. In particular, some involved workarounds were implemented to allow fully-automated dropping of default- or constraint-holding columns with SQL Server. .. change:: :tags: :tickets: MySQL support has also been implemented to a basic degree, including MySQL's awkward style of modifying columns being accommodated. .. change:: :tags: :tickets: Other database environments not included among those three have *not* been tested, *at all*. This includes Firebird, Oracle, Sybase. Adding support for these backends should be straightforward. Please report all missing/ incorrect behaviors to the bugtracker! Patches are welcome here but are optional - please just indicate the exact format expected by the target database. .. change:: :tags: :tickets: SQLite, as a backend, has almost no support for schema alterations to existing databases. The author would strongly recommend that SQLite not be used in a migration context - just dump your SQLite database into an intermediary format, then dump it back into a new schema. For dev environments, the dev installer should be building the whole DB from scratch. Or just use Postgresql, which is a much better database for non-trivial schemas. Requests for full ALTER support on SQLite should be reported to SQLite's bug tracker at http://www.sqlite.org/src/wiki?name=Bug+Reports, as Alembic will not be implementing the "rename the table to a temptable then copy the data into a new table" workaround. Note that Alembic will at some point offer an extensible API so that you can implement commands like this yourself. .. change:: :tags: :tickets: Well-tested directives include add/drop table, add/drop column, including support for SQLAlchemy "schema" types which generate additional CHECK constraints, i.e. Boolean, Enum. Other directives not included here have *not* been strongly tested in production, i.e. rename table, etc. .. change:: :tags: :tickets: Both "online" and "offline" migrations, the latter being generated SQL scripts to hand off to a DBA, have been strongly production tested against Postgresql and SQL Server. .. change:: :tags: :tickets: Modify column type, default status, nullable, is functional and tested across PG, MSSQL, MySQL, but not yet widely tested in production usage. .. change:: :tags: :tickets: Many migrations are still outright missing, i.e. create/add sequences, etc. As a workaround, execute() can be used for those which are missing, though posting of tickets for new features/missing behaviors is strongly encouraged. .. change:: :tags: :tickets: Autogenerate feature is implemented and has been tested, though only a little bit in a production setting. In particular, detection of type and server default changes are optional and are off by default; they can also be customized by a callable. Both features work but can have surprises particularly the disparity between BIT/TINYINT and boolean, which hasn't yet been worked around, as well as format changes performed by the database on defaults when it reports back. When enabled, the PG dialect will execute the two defaults to be compared to see if they are equivalent. Other backends may need to do the same thing. The autogenerate feature only generates "candidate" commands which must be hand-tailored in any case, so is still a useful feature and is safe to use. Please report missing/broken features of autogenerate! This will be a great feature and will also improve SQLAlchemy's reflection services. .. change:: :tags: :tickets: Support for non-ASCII table, column and constraint names is mostly nonexistent. This is also a straightforward feature add as SQLAlchemy itself supports unicode identifiers; Alembic itself will likely need fixes to logging, column identification by key, etc. for full support here. alembic-rel_1_7_6/docs/build/conf.py000066400000000000000000000167231417624537100174760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Alembic documentation build configuration file, created by # sphinx-quickstart on Sat May 1 12:47:55 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath(".")) # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath("../../")) if True: import alembic # noqa # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "changelog", "sphinx_paramlinks", ] # tags to sort on inside of sections changelog_sections = [ "changed", "feature", "usecase", "bug", "moved", "removed", ] changelog_render_ticket = "https://github.com/sqlalchemy/alembic/issues/%s" changelog_render_pullreq = "https://github.com/sqlalchemy/alembic/pull/%s" changelog_render_pullreq = { "default": "https://github.com/sqlalchemy/alembic/pull/%s", "github": "https://github.com/sqlalchemy/alembic/pull/%s", } autodoc_default_flags = ["members"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8' nitpicky = True # The master toctree document. master_doc = "index" # General information about the project. project = u"Alembic" copyright = u"2010-2022, Mike Bayer" # noqa # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = alembic.__version__ # The full version, including alpha/beta/rc tags. release = "1.7.6" release_date = "February 1, 2022" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = "nature" html_style = "nature_override.css" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { "**": [ "site_custom_sidebars.html", "localtoc.html", "searchbox.html", "relations.html", ] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = "Alembicdoc" # -- Options for LaTeX output ----------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples (source start # file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", "Alembic.tex", u"Alembic Documentation", u"Mike Bayer", "manual") ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True # {'python': ('http://docs.python.org/3.2', None)} autoclass_content = "both" intersphinx_mapping = { "sqla": ("https://docs.sqlalchemy.org/en/latest/", None), "python": ("http://docs.python.org/", None), } alembic-rel_1_7_6/docs/build/cookbook.rst000066400000000000000000001565031417624537100205400ustar00rootroot00000000000000======== Cookbook ======== A collection of "How-Tos" highlighting popular ways to extend Alembic. .. note:: This is a new section where we catalogue various "how-tos" based on user requests. It is often the case that users will request a feature only to learn it can be provided with a simple customization. .. _building_uptodate: Building an Up to Date Database from Scratch ============================================= There's a theory of database migrations that says that the revisions in existence for a database should be able to go from an entirely blank schema to the finished product, and back again. Alembic can roll this way. Though we think it's kind of overkill, considering that SQLAlchemy itself can emit the full CREATE statements for any given model using :meth:`~sqlalchemy.schema.MetaData.create_all`. If you check out a copy of an application, running this will give you the entire database in one shot, without the need to run through all those migration files, which are instead tailored towards applying incremental changes to an existing database. Alembic can integrate with a :meth:`~sqlalchemy.schema.MetaData.create_all` script quite easily. After running the create operation, tell Alembic to create a new version table, and to stamp it with the most recent revision (i.e. ``head``):: # inside of a "create the database" script, first create # tables: my_metadata.create_all(engine) # then, load the Alembic configuration and generate the # version table, "stamping" it with the most recent rev: from alembic.config import Config from alembic import command alembic_cfg = Config("/path/to/yourapp/alembic.ini") command.stamp(alembic_cfg, "head") When this approach is used, the application can generate the database using normal SQLAlchemy techniques instead of iterating through hundreds of migration scripts. Now, the purpose of the migration scripts is relegated just to movement between versions on out-of-date databases, not *new* databases. You can now remove old migration files that are no longer represented on any existing environments. To prune old migration files, simply delete the files. Then, in the earliest, still-remaining migration file, set ``down_revision`` to ``None``:: # replace this: #down_revision = '290696571ad2' # with this: down_revision = None That file now becomes the "base" of the migration series. Conditional Migration Elements ============================== This example features the basic idea of a common need, that of affecting how a migration runs based on command line switches. The technique to use here is simple; within a migration script, inspect the :meth:`.EnvironmentContext.get_x_argument` collection for any additional, user-defined parameters. Then take action based on the presence of those arguments. To make it such that the logic to inspect these flags is easy to use and modify, we modify our ``script.py.mako`` template to make this feature available in all new revision files: .. code-block:: mako """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} from alembic import context def upgrade(): schema_upgrades() if context.get_x_argument(as_dictionary=True).get('data', None): data_upgrades() def downgrade(): if context.get_x_argument(as_dictionary=True).get('data', None): data_downgrades() schema_downgrades() def schema_upgrades(): """schema upgrade migrations go here.""" ${upgrades if upgrades else "pass"} def schema_downgrades(): """schema downgrade migrations go here.""" ${downgrades if downgrades else "pass"} def data_upgrades(): """Add any optional data upgrade migrations here!""" pass def data_downgrades(): """Add any optional data downgrade migrations here!""" pass Now, when we create a new migration file, the ``data_upgrades()`` and ``data_downgrades()`` placeholders will be available, where we can add optional data migrations:: """rev one Revision ID: 3ba2b522d10d Revises: None Create Date: 2014-03-04 18:05:36.992867 """ # revision identifiers, used by Alembic. revision = '3ba2b522d10d' down_revision = None from alembic import op import sqlalchemy as sa from sqlalchemy import String, Column from sqlalchemy.sql import table, column from alembic import context def upgrade(): schema_upgrades() if context.get_x_argument(as_dictionary=True).get('data', None): data_upgrades() def downgrade(): if context.get_x_argument(as_dictionary=True).get('data', None): data_downgrades() schema_downgrades() def schema_upgrades(): """schema upgrade migrations go here.""" op.create_table("my_table", Column('data', String)) def schema_downgrades(): """schema downgrade migrations go here.""" op.drop_table("my_table") def data_upgrades(): """Add any optional data upgrade migrations here!""" my_table = table('my_table', column('data', String), ) op.bulk_insert(my_table, [ {'data': 'data 1'}, {'data': 'data 2'}, {'data': 'data 3'}, ] ) def data_downgrades(): """Add any optional data downgrade migrations here!""" op.execute("delete from my_table") To invoke our migrations with data included, we use the ``-x`` flag:: alembic -x data=true upgrade head The :meth:`.EnvironmentContext.get_x_argument` is an easy way to support new commandline options within environment and migration scripts. .. _connection_sharing: Sharing a Connection with a Series of Migration Commands and Environments ========================================================================= It is often the case that an application will need to call upon a series of commands within :ref:`alembic.command.toplevel`, where it would be advantageous for all operations to proceed along a single transaction. The connectivity for a migration is typically solely determined within the ``env.py`` script of a migration environment, which is called within the scope of a command. The steps to take here are: 1. Produce the :class:`~sqlalchemy.engine.Connection` object to use. 2. Place it somewhere that ``env.py`` will be able to access it. This can be either a. a module-level global somewhere, or b. an attribute which we place into the :attr:`.Config.attributes` dictionary (if we are on an older Alembic version, we may also attach an attribute directly to the :class:`.Config` object). 3. The ``env.py`` script is modified such that it looks for this :class:`~sqlalchemy.engine.Connection` and makes use of it, in lieu of building up its own :class:`~sqlalchemy.engine.Engine` instance. We illustrate using :attr:`.Config.attributes`:: from alembic import command, config cfg = config.Config("/path/to/yourapp/alembic.ini") with engine.begin() as connection: cfg.attributes['connection'] = connection command.upgrade(cfg, "head") Then in ``env.py``:: def run_migrations_online(): connectable = config.attributes.get('connection', None) if connectable is None: # only create Engine if we don't have a Connection # from the outside connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) context.configure( connection=connectable, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() .. versionchanged:: 1.4 Prior to this version, we used a "branched connection", by calling :meth:`~sqlalchemy.engine.Connection.connect`. This is now deprecated and unnecessary, since we no longer have to guess if the given "connection" is an ``Engine`` or ``Connection``, it is always a ``Connection``. .. _replaceable_objects: Replaceable Objects =================== This recipe proposes a hypothetical way of dealing with what we might call a *replaceable* schema object. A replaceable object is a schema object that needs to be created and dropped all at once. Examples of such objects include views, stored procedures, and triggers. .. seealso:: The Replaceable Object concept has been integrated by the `Alembic Utils `_ project, which provides autogenerate and migration support for PostgreSQL functions and views. See Alembic Utils at https://github.com/olirice/alembic_utils . Replaceable objects present a problem in that in order to make incremental changes to them, we have to refer to the whole definition at once. If we need to add a new column to a view, for example, we have to drop it entirely and recreate it fresh with the extra column added, referring to the whole structure; but to make it even tougher, if we wish to support downgrade operarations in our migration scripts, we need to refer to the *previous* version of that construct fully, and we'd much rather not have to type out the whole definition in multiple places. This recipe proposes that we may refer to the older version of a replaceable construct by directly naming the migration version in which it was created, and having a migration refer to that previous file as migrations run. We will also demonstrate how to integrate this logic within the :ref:`operation_plugins` feature introduced in Alembic 0.8. It may be very helpful to review this section first to get an overview of this API. The Replaceable Object Structure -------------------------------- We first need to devise a simple format that represents the "CREATE XYZ" / "DROP XYZ" aspect of what it is we're building. We will work with an object that represents a textual definition; while a SQL view is an object that we can define using a `table-metadata-like system `_, this is not so much the case for things like stored procedures, where we pretty much need to have a full string definition written down somewhere. We'll use a simple value object called ``ReplaceableObject`` that can represent any named set of SQL text to send to a "CREATE" statement of some kind:: class ReplaceableObject: def __init__(self, name, sqltext): self.name = name self.sqltext = sqltext Using this object in a migration script, assuming a Postgresql-style syntax, looks like:: customer_view = ReplaceableObject( "customer_view", "SELECT name, order_count FROM customer WHERE order_count > 0" ) add_customer_sp = ReplaceableObject( "add_customer_sp(name varchar, order_count integer)", """ RETURNS integer AS $$ BEGIN insert into customer (name, order_count) VALUES (in_name, in_order_count); END; $$ LANGUAGE plpgsql; """ ) The ``ReplaceableObject`` class is only one very simplistic way to do this. The structure of how we represent our schema objects is not too important for the purposes of this example; we can just as well put strings inside of tuples or dictionaries, as well as that we could define any kind of series of fields and class structures we want. The only important part is that below we will illustrate how organize the code that can consume the structure we create here. Create Operations for the Target Objects ---------------------------------------- We'll use the :class:`.Operations` extension API to make new operations for create, drop, and replace of views and stored procedures. Using this API is also optional; we can just as well make any kind of Python function that we would invoke from our migration scripts. However, using this API gives us operations built directly into the Alembic ``op.*`` namespace very nicely. The most intricate class is below. This is the base of our "replaceable" operation, which includes not just a base operation for emitting CREATE and DROP instructions on a ``ReplaceableObject``, it also assumes a certain model of "reversibility" which makes use of references to other migration files in order to refer to the "previous" version of an object:: from alembic.operations import Operations, MigrateOperation class ReversibleOp(MigrateOperation): def __init__(self, target): self.target = target @classmethod def invoke_for_target(cls, operations, target): op = cls(target) return operations.invoke(op) def reverse(self): raise NotImplementedError() @classmethod def _get_object_from_version(cls, operations, ident): version, objname = ident.split(".") module = operations.get_context().script.get_revision(version).module obj = getattr(module, objname) return obj @classmethod def replace(cls, operations, target, replaces=None, replace_with=None): if replaces: old_obj = cls._get_object_from_version(operations, replaces) drop_old = cls(old_obj).reverse() create_new = cls(target) elif replace_with: old_obj = cls._get_object_from_version(operations, replace_with) drop_old = cls(target).reverse() create_new = cls(old_obj) else: raise TypeError("replaces or replace_with is required") operations.invoke(drop_old) operations.invoke(create_new) The workings of this class should become clear as we walk through the example. To create usable operations from this base, we will build a series of stub classes and use :meth:`.Operations.register_operation` to make them part of the ``op.*`` namespace:: @Operations.register_operation("create_view", "invoke_for_target") @Operations.register_operation("replace_view", "replace") class CreateViewOp(ReversibleOp): def reverse(self): return DropViewOp(self.target) @Operations.register_operation("drop_view", "invoke_for_target") class DropViewOp(ReversibleOp): def reverse(self): return CreateViewOp(self.target) @Operations.register_operation("create_sp", "invoke_for_target") @Operations.register_operation("replace_sp", "replace") class CreateSPOp(ReversibleOp): def reverse(self): return DropSPOp(self.target) @Operations.register_operation("drop_sp", "invoke_for_target") class DropSPOp(ReversibleOp): def reverse(self): return CreateSPOp(self.target) To actually run the SQL like "CREATE VIEW" and "DROP SEQUENCE", we'll provide implementations using :meth:`.Operations.implementation_for` that run straight into :meth:`.Operations.execute`:: @Operations.implementation_for(CreateViewOp) def create_view(operations, operation): operations.execute("CREATE VIEW %s AS %s" % ( operation.target.name, operation.target.sqltext )) @Operations.implementation_for(DropViewOp) def drop_view(operations, operation): operations.execute("DROP VIEW %s" % operation.target.name) @Operations.implementation_for(CreateSPOp) def create_sp(operations, operation): operations.execute( "CREATE FUNCTION %s %s" % ( operation.target.name, operation.target.sqltext ) ) @Operations.implementation_for(DropSPOp) def drop_sp(operations, operation): operations.execute("DROP FUNCTION %s" % operation.target.name) All of the above code can be present anywhere within an application's source tree; the only requirement is that when the ``env.py`` script is invoked, it includes imports that ultimately call upon these classes as well as the :meth:`.Operations.register_operation` and :meth:`.Operations.implementation_for` sequences. Create Initial Migrations ------------------------- We can now illustrate how these objects look during use. For the first step, we'll create a new migration to create a "customer" table:: $ alembic revision -m "create table" We build the first revision as follows:: """create table Revision ID: 3ab8b2dfb055 Revises: Create Date: 2015-07-27 16:22:44.918507 """ # revision identifiers, used by Alembic. revision = '3ab8b2dfb055' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( "customer", sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.String), sa.Column('order_count', sa.Integer), ) def downgrade(): op.drop_table('customer') For the second migration, we will create a view and a stored procedure which act upon this table:: $ alembic revision -m "create views/sp" This migration will use the new directives:: """create views/sp Revision ID: 28af9800143f Revises: 3ab8b2dfb055 Create Date: 2015-07-27 16:24:03.589867 """ # revision identifiers, used by Alembic. revision = '28af9800143f' down_revision = '3ab8b2dfb055' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from foo import ReplaceableObject customer_view = ReplaceableObject( "customer_view", "SELECT name, order_count FROM customer WHERE order_count > 0" ) add_customer_sp = ReplaceableObject( "add_customer_sp(name varchar, order_count integer)", """ RETURNS integer AS $$ BEGIN insert into customer (name, order_count) VALUES (in_name, in_order_count); END; $$ LANGUAGE plpgsql; """ ) def upgrade(): op.create_view(customer_view) op.create_sp(add_customer_sp) def downgrade(): op.drop_view(customer_view) op.drop_sp(add_customer_sp) We see the use of our new ``create_view()``, ``create_sp()``, ``drop_view()``, and ``drop_sp()`` directives. Running these to "head" we get the following (this includes an edited view of SQL emitted):: $ alembic upgrade 28af9800143 INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL. INFO [sqlalchemy.engine.base.Engine] BEGIN (implicit) INFO [sqlalchemy.engine.base.Engine] select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s INFO [sqlalchemy.engine.base.Engine] {'name': u'alembic_version'} INFO [sqlalchemy.engine.base.Engine] SELECT alembic_version.version_num FROM alembic_version INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s INFO [sqlalchemy.engine.base.Engine] {'name': u'alembic_version'} INFO [alembic.runtime.migration] Running upgrade -> 3ab8b2dfb055, create table INFO [sqlalchemy.engine.base.Engine] CREATE TABLE customer ( id SERIAL NOT NULL, name VARCHAR, order_count INTEGER, PRIMARY KEY (id) ) INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] INSERT INTO alembic_version (version_num) VALUES ('3ab8b2dfb055') INFO [sqlalchemy.engine.base.Engine] {} INFO [alembic.runtime.migration] Running upgrade 3ab8b2dfb055 -> 28af9800143f, create views/sp INFO [sqlalchemy.engine.base.Engine] CREATE VIEW customer_view AS SELECT name, order_count FROM customer WHERE order_count > 0 INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] CREATE FUNCTION add_customer_sp(name varchar, order_count integer) RETURNS integer AS $$ BEGIN insert into customer (name, order_count) VALUES (in_name, in_order_count); END; $$ LANGUAGE plpgsql; INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] UPDATE alembic_version SET version_num='28af9800143f' WHERE alembic_version.version_num = '3ab8b2dfb055' INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] COMMIT We see that our CREATE TABLE proceeded as well as the CREATE VIEW and CREATE FUNCTION operations produced by our new directives. Create Revision Migrations -------------------------- Finally, we can illustrate how we would "revise" these objects. Let's consider we added a new column ``email`` to our ``customer`` table:: $ alembic revision -m "add email col" The migration is:: """add email col Revision ID: 191a2d20b025 Revises: 28af9800143f Create Date: 2015-07-27 16:25:59.277326 """ # revision identifiers, used by Alembic. revision = '191a2d20b025' down_revision = '28af9800143f' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.add_column("customer", sa.Column("email", sa.String())) def downgrade(): op.drop_column("customer", "email") We now need to recreate the ``customer_view`` view and the ``add_customer_sp`` function. To include downgrade capability, we will need to refer to the **previous** version of the construct; the ``replace_view()`` and ``replace_sp()`` operations we've created make this possible, by allowing us to refer to a specific, previous revision. the ``replaces`` and ``replace_with`` arguments accept a dot-separated string, which refers to a revision number and an object name, such as ``"28af9800143f.customer_view"``. The ``ReversibleOp`` class makes use of the :meth:`.Operations.get_context` method to locate the version file we refer to:: $ alembic revision -m "update views/sp" The migration:: """update views/sp Revision ID: 199028bf9856 Revises: 191a2d20b025 Create Date: 2015-07-27 16:26:31.344504 """ # revision identifiers, used by Alembic. revision = '199028bf9856' down_revision = '191a2d20b025' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from foo import ReplaceableObject customer_view = ReplaceableObject( "customer_view", "SELECT name, order_count, email " "FROM customer WHERE order_count > 0" ) add_customer_sp = ReplaceableObject( "add_customer_sp(name varchar, order_count integer, email varchar)", """ RETURNS integer AS $$ BEGIN insert into customer (name, order_count, email) VALUES (in_name, in_order_count, email); END; $$ LANGUAGE plpgsql; """ ) def upgrade(): op.replace_view(customer_view, replaces="28af9800143f.customer_view") op.replace_sp(add_customer_sp, replaces="28af9800143f.add_customer_sp") def downgrade(): op.replace_view(customer_view, replace_with="28af9800143f.customer_view") op.replace_sp(add_customer_sp, replace_with="28af9800143f.add_customer_sp") Above, instead of using ``create_view()``, ``create_sp()``, ``drop_view()``, and ``drop_sp()`` methods, we now use ``replace_view()`` and ``replace_sp()``. The replace operation we've built always runs a DROP *and* a CREATE. Running an upgrade to head we see:: $ alembic upgrade head INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL. INFO [sqlalchemy.engine.base.Engine] BEGIN (implicit) INFO [sqlalchemy.engine.base.Engine] select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s INFO [sqlalchemy.engine.base.Engine] {'name': u'alembic_version'} INFO [sqlalchemy.engine.base.Engine] SELECT alembic_version.version_num FROM alembic_version INFO [sqlalchemy.engine.base.Engine] {} INFO [alembic.runtime.migration] Running upgrade 28af9800143f -> 191a2d20b025, add email col INFO [sqlalchemy.engine.base.Engine] ALTER TABLE customer ADD COLUMN email VARCHAR INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] UPDATE alembic_version SET version_num='191a2d20b025' WHERE alembic_version.version_num = '28af9800143f' INFO [sqlalchemy.engine.base.Engine] {} INFO [alembic.runtime.migration] Running upgrade 191a2d20b025 -> 199028bf9856, update views/sp INFO [sqlalchemy.engine.base.Engine] DROP VIEW customer_view INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] CREATE VIEW customer_view AS SELECT name, order_count, email FROM customer WHERE order_count > 0 INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] DROP FUNCTION add_customer_sp(name varchar, order_count integer) INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] CREATE FUNCTION add_customer_sp(name varchar, order_count integer, email varchar) RETURNS integer AS $$ BEGIN insert into customer (name, order_count, email) VALUES (in_name, in_order_count, email); END; $$ LANGUAGE plpgsql; INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] UPDATE alembic_version SET version_num='199028bf9856' WHERE alembic_version.version_num = '191a2d20b025' INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] COMMIT After adding our new ``email`` column, we see that both ``customer_view`` and ``add_customer_sp()`` are dropped before the new version is created. If we downgrade back to the old version, we see the old version of these recreated again within the downgrade for this migration:: $ alembic downgrade 28af9800143 INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL. INFO [sqlalchemy.engine.base.Engine] BEGIN (implicit) INFO [sqlalchemy.engine.base.Engine] select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s INFO [sqlalchemy.engine.base.Engine] {'name': u'alembic_version'} INFO [sqlalchemy.engine.base.Engine] SELECT alembic_version.version_num FROM alembic_version INFO [sqlalchemy.engine.base.Engine] {} INFO [alembic.runtime.migration] Running downgrade 199028bf9856 -> 191a2d20b025, update views/sp INFO [sqlalchemy.engine.base.Engine] DROP VIEW customer_view INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] CREATE VIEW customer_view AS SELECT name, order_count FROM customer WHERE order_count > 0 INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] DROP FUNCTION add_customer_sp(name varchar, order_count integer, email varchar) INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] CREATE FUNCTION add_customer_sp(name varchar, order_count integer) RETURNS integer AS $$ BEGIN insert into customer (name, order_count) VALUES (in_name, in_order_count); END; $$ LANGUAGE plpgsql; INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] UPDATE alembic_version SET version_num='191a2d20b025' WHERE alembic_version.version_num = '199028bf9856' INFO [sqlalchemy.engine.base.Engine] {} INFO [alembic.runtime.migration] Running downgrade 191a2d20b025 -> 28af9800143f, add email col INFO [sqlalchemy.engine.base.Engine] ALTER TABLE customer DROP COLUMN email INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] UPDATE alembic_version SET version_num='28af9800143f' WHERE alembic_version.version_num = '191a2d20b025' INFO [sqlalchemy.engine.base.Engine] {} INFO [sqlalchemy.engine.base.Engine] COMMIT .. _cookbook_postgresql_multi_tenancy: Rudimental Schema-Level Multi Tenancy for PostgreSQL Databases ============================================================== **Multi tenancy** refers to an application that accommodates for many clients simultaneously. Within the scope of a database migrations tool, multi-tenancy typically refers to the practice of maintaining multiple, identical databases where each database is assigned to one client. Alembic does not currently have explicit multi-tenant support; typically, the approach must involve running Alembic multiple times against different database URLs. One common approach to multi-tenancy, particularly on the PostgreSQL database, is to install tenants within **individual PostgreSQL schemas**. When using PostgreSQL's schemas, a special variable ``search_path`` is offered that is intended to assist with targeting of different schemas. .. note:: SQLAlchemy includes a system of directing a common set of ``Table`` metadata to many schemas called `schema_translate_map `_. Alembic at the time of this writing lacks adequate support for this feature. The recipe below should be considered **interim** until Alembic has more first-class support for schema-level multi-tenancy. The recipe below can be altered for flexibility. The primary purpose of this recipe is to illustrate how to point the Alembic process towards one PostgreSQL schema or another. 1. The model metadata used as the target for autogenerate must not include any schema name for tables; the schema must be non-present or set to ``None``. Otherwise, Alembic autogenerate will still attempt to compare and render tables in terms of this schema:: class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) data = Column(UnicodeText()) foo = Column(Integer) __table_args__ = { "schema": None } .. 2. The :paramref:`.EnvironmentContext.configure.include_schemas` flag must also be False or not included. 3. The "tenant" will be a schema name passed to Alembic using the "-x" flag. In ``env.py`` an approach like the following allows ``-xtenant=some_schema`` to be supported by making use of :meth:`.EnvironmentContext.get_x_argument`:: def run_migrations_online(): connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) current_tenant = context.get_x_argument(as_dictionary=True).get("tenant") with connectable.connect() as connection: # set search path on the connection, which ensures that # PostgreSQL will emit all CREATE / ALTER / DROP statements # in terms of this schema by default connection.execute("set search_path to %s" % current_tenant) # make use of non-supported SQLAlchemy attribute to ensure # the dialect reflects tables in terms of the current tenant name connection.dialect.default_schema_name = current_tenant context.configure( connection=connection, target_metadata=target_metadata, ) with context.begin_transaction(): context.run_migrations() The current tenant is set using the PostgreSQL ``search_path`` variable on the connection. Note above we must employ a **non-supported SQLAlchemy workaround** at the moment which is to hardcode the SQLAlchemy dialect's default schema name to our target schema. It is also important to note that the above changes **remain on the connection permanently unless reversed explicitly**. If the alembic application simply exits above, there is no issue. However if the application attempts to continue using the above connection for other purposes, it may be necessary to reset these variables back to the default, which for PostgreSQL is usually the name "public" however may be different based on configuration. 4. Alembic operations will now proceed in terms of whichever schema we pass on the command line. All logged SQL will show no schema, except for reflection operations which will make use of the ``default_schema_name`` attribute:: []$ alembic -x tenant=some_schema revision -m "rev1" --autogenerate .. 5. Since all schemas are to be maintained in sync, autogenerate should be run against only **one** schema, generating new Alembic migration files. Autogenerate migratin operations are then run against **all** schemas. .. _cookbook_no_empty_migrations: Don't Generate Empty Migrations with Autogenerate ================================================= A common request is to have the ``alembic revision --autogenerate`` command not actually generate a revision file if no changes to the schema is detected. Using the :paramref:`.EnvironmentContext.configure.process_revision_directives` hook, this is straightforward; place a ``process_revision_directives`` hook in :meth:`.MigrationContext.configure` which removes the single :class:`.MigrationScript` directive if it is empty of any operations:: def run_migrations_online(): # ... def process_revision_directives(context, revision, directives): if config.cmd_opts.autogenerate: script = directives[0] if script.upgrade_ops.is_empty(): directives[:] = [] # connectable = ... with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives ) with context.begin_transaction(): context.run_migrations() .. _cookbook_dont_emit_drop_index: Don't emit DROP INDEX when the table is to be dropped as well ============================================================= MySQL may complain when dropping an index that is against a column that also has a foreign key constraint on it. If the table is to be dropped in any case, the DROP INDEX isn't necessary. This recipe will process the set of autogenerate directives such that all :class:`.DropIndexOp` directives are removed against tables that themselves are to be dropped:: def run_migrations_online(): # ... from alembic.operations import ops def process_revision_directives(context, revision, directives): script = directives[0] # process both "def upgrade()", "def downgrade()" for directive in (script.upgrade_ops, script.downgrade_ops): # make a set of tables that are being dropped within # the migration function tables_dropped = set() for op in directive.ops: if isinstance(op, ops.DropTableOp): tables_dropped.add((op.table_name, op.schema)) # now rewrite the list of "ops" such that DropIndexOp # is removed for those tables. Needs a recursive function. directive.ops = list( _filter_drop_indexes(directive.ops, tables_dropped) ) def _filter_drop_indexes(directives, tables_dropped): # given a set of (tablename, schemaname) to be dropped, filter # out DropIndexOp from the list of directives and yield the result. for directive in directives: # ModifyTableOps is a container of ALTER TABLE types of # commands. process those in place recursively. if isinstance(directive, ops.ModifyTableOps) and \ (directive.table_name, directive.schema) in tables_dropped: directive.ops = list( _filter_drop_indexes(directive.ops, tables_dropped) ) # if we emptied out the directives, then skip the # container altogether. if not directive.ops: continue elif isinstance(directive, ops.DropIndexOp) and \ (directive.table_name, directive.schema) in tables_dropped: # we found a target DropIndexOp. keep looping continue # otherwise if not filtered, yield out the directive yield directive # connectable = ... with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives ) with context.begin_transaction(): context.run_migrations() Whereas autogenerate, when dropping two tables with a foreign key and an index, would previously generate something like:: def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_b_aid'), table_name='b') op.drop_table('b') op.drop_table('a') # ### end Alembic commands ### With the above rewriter, it generates as:: def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('b') op.drop_table('a') # ### end Alembic commands ### Don't generate any DROP TABLE directives with autogenerate ========================================================== When running autogenerate against a database that has existing tables outside of the application's autogenerated metadata, it may be desirable to prevent autogenerate from considering any of those existing tables to be dropped. This will prevent autogenerate from detecting tables removed from the local metadata as well however this is only a small caveat. The most direct way to achieve this using the :paramref:`.EnvironmentContext.configure.include_object` hook. There is no need to hardcode a fixed "whitelist" of table names; the hook gives enough information in the given arguments to determine if a particular table name is not part of the local :class:`.MetaData` being autogenerated, by checking first that the type of object is ``"table"``, then that ``reflected`` is ``True``, indicating this table name is from the local database connection, not the :class:`.MetaData`, and finally that ``compare_to`` is ``None``, indicating autogenerate is not comparing this :class:`.Table` to any :class:`.Table` in the local :class:`.MetaData` collection:: # in env.py def include_object(object, name, type_, reflected, compare_to): if type_ == "table" and reflected and compare_to is None: return False else: return True context.configure( # ... include_object = include_object ) .. _cookbook_custom_sorting_create_table: Apply Custom Sorting to Table Columns within CREATE TABLE ========================================================== This example illustrates use of the :class:`.Rewriter` object introduced at :ref:`autogen_rewriter`. While the rewriter grants access to the individual :class:`.ops.MigrateOperation` objects, there are sometimes some special techniques required to get around some structural limitations that are present. One is when trying to reorganize the order of columns in a table within a :class:`.ops.CreateTableOp` directive. This directive, when generated by autogenerate, actually holds onto the original :class:`.Table` object as the source of its information, so attempting to reorder the :attr:`.ops.CreateTableOp.columns` collection will usually have no effect. Instead, a new :class:`.ops.CreateTableOp` object may be constructed with the new ordering. However, a second issue is that the :class:`.Column` objects inside will already be associated with the :class:`.Table` that is from the model being autogenerated, meaning they can't be reassigned directly to a new :class:`.Table`. To get around this, we can copy all the columns and constraints using methods like :meth:`.Column.copy`. Below we use :class:`.Rewriter` to create a new :class:`.ops.CreateTableOp` directive and to copy the :class:`.Column` objects from one into another, copying each column or constraint object and applying a new sorting scheme:: # in env.py from alembic.operations import ops from alembic.autogenerate import rewriter writer = rewriter.Rewriter() @writer.rewrites(ops.CreateTableOp) def order_columns(context, revision, op): special_names = {"id": -100, "created_at": 1001, "updated_at": 1002} cols_by_key = [ ( special_names.get(col.key, index) if isinstance(col, Column) else 2000, col.copy(), ) for index, col in enumerate(op.columns) ] columns = [ col for idx, col in sorted(cols_by_key, key=lambda entry: entry[0]) ] return ops.CreateTableOp( op.table_name, columns, schema=op.schema, **op.kw) # ... context.configure( # ... process_revision_directives=writer ) Above, when we apply the ``writer`` to a table such as:: Table( "my_table", m, Column("data", String(50)), Column("created_at", DateTime), Column("id", Integer, primary_key=True), Column("updated_at", DateTime), UniqueConstraint("data", name="uq_data") ) This will render in the autogenerated file as:: def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "my_table", sa.Column("id", sa.Integer(), nullable=False), sa.Column("data", sa.String(length=50), nullable=True), sa.Column("created_at", sa.DateTime(), nullable=True), sa.Column("updated_at", sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint("id"), sa.UniqueConstraint("data", name="uq_data"), ) # ### end Alembic commands ### Don't emit CREATE TABLE statements for Views ============================================ It is sometimes convenient to create :class:`~sqlalchemy.schema.Table` instances for views so that they can be queried using normal SQLAlchemy techniques. Unfortunately this causes Alembic to treat them as tables in need of creation and to generate spurious ``create_table()`` operations. This is easily fixable by flagging such Tables and using the :paramref:`~.EnvironmentContext.configure.include_object` hook to exclude them:: my_view = Table('my_view', metadata, autoload=True, info=dict(is_view=True)) # Flag this as a view Then define ``include_object`` as:: def include_object(object, name, type_, reflected, compare_to): """ Exclude views from Alembic's consideration. """ return not object.info.get('is_view', False) Finally, in ``env.py`` pass your ``include_object`` as a keyword argument to :meth:`.EnvironmentContext.configure`. .. _multiple_environments: Run Multiple Alembic Environments from one .ini file ==================================================== Long before Alembic had the "multiple bases" feature described in :ref:`multiple_bases`, projects had a need to maintain more than one Alembic version history in a single project, where these version histories are completely independent of each other and each refer to their own alembic_version table, either across multiple databases, schemas, or namespaces. A simple approach was added to support this, the ``--name`` flag on the commandline. First, one would create an alembic.ini file of this form:: [DEFAULT] # all defaults shared between environments go here sqlalchemy.url = postgresql://scott:tiger@hostname/mydatabase [schema1] # path to env.py and migration scripts for schema1 script_location = myproject/revisions/schema1 [schema2] # path to env.py and migration scripts for schema2 script_location = myproject/revisions/schema2 [schema3] # path to env.py and migration scripts for schema3 script_location = myproject/revisions/db2 # this schema uses a different database URL as well sqlalchemy.url = postgresql://scott:tiger@hostname/myotherdatabase Above, in the ``[DEFAULT]`` section we set up a default database URL. Then we create three sections corresponding to different revision lineages in our project. Each of these directories would have its own ``env.py`` and set of versioning files. Then when we run the ``alembic`` command, we simply give it the name of the configuration we want to use:: alembic --name schema2 revision -m "new rev for schema 2" --autogenerate Above, the ``alembic`` command makes use of the configuration in ``[schema2]``, populated with defaults from the ``[DEFAULT]`` section. The above approach can be automated by creating a custom front-end to the Alembic commandline as well. Print Python Code to Generate Particular Database Tables ======================================================== Suppose you have a database already, and want to generate some ``op.create_table()`` and other directives that you'd have in a migration file. How can we automate generating that code? Suppose the database schema looks like (assume MySQL):: CREATE TABLE IF NOT EXISTS `users` ( `id` int(11) NOT NULL, KEY `id` (`id`) ); CREATE TABLE IF NOT EXISTS `user_properties` ( `users_id` int(11) NOT NULL, `property_name` varchar(255) NOT NULL, `property_value` mediumtext NOT NULL, UNIQUE KEY `property_name_users_id` (`property_name`,`users_id`), KEY `users_id` (`users_id`), CONSTRAINT `user_properties_ibfk_1` FOREIGN KEY (`users_id`) REFERENCES `users` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Using :class:`.ops.UpgradeOps`, :class:`.ops.CreateTableOp`, and :class:`.ops.CreateIndexOp`, we create a migration file structure, using :class:`.Table` objects that we get from SQLAlchemy reflection. The structure is passed to :func:`.autogenerate.render_python_code` to produce the Python code for a migration file:: from sqlalchemy import create_engine from sqlalchemy import MetaData, Table from alembic import autogenerate from alembic.operations import ops e = create_engine("mysql://scott:tiger@localhost/test") with e.connect() as conn: m = MetaData() user_table = Table('users', m, autoload_with=conn) user_property_table = Table('user_properties', m, autoload_with=conn) print(autogenerate.render_python_code( ops.UpgradeOps( ops=[ ops.CreateTableOp.from_table(table) for table in m.tables.values() ] + [ ops.CreateIndexOp.from_index(idx) for table in m.tables.values() for idx in table.indexes ] )) ) Output:: # ### commands auto generated by Alembic - please adjust! ### op.create_table('users', sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False), mysql_default_charset='latin1', mysql_engine='InnoDB' ) op.create_table('user_properties', sa.Column('users_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False), sa.Column('property_name', mysql.VARCHAR(length=255), nullable=False), sa.Column('property_value', mysql.MEDIUMTEXT(), nullable=False), sa.ForeignKeyConstraint(['users_id'], ['users.id'], name='user_properties_ibfk_1', ondelete='CASCADE'), mysql_comment='user properties', mysql_default_charset='utf8', mysql_engine='InnoDB' ) op.create_index('id', 'users', ['id'], unique=False) op.create_index('users_id', 'user_properties', ['users_id'], unique=False) op.create_index('property_name_users_id', 'user_properties', ['property_name', 'users_id'], unique=True) # ### end Alembic commands ### Run Alembic Operation Objects Directly (as in from autogenerate) ================================================================ The :class:`.Operations` object has a method known as :meth:`.Operations.invoke` that will generically invoke a particular operation object. We can therefore use the :func:`.autogenerate.produce_migrations` function to run an autogenerate comparison, get back a :class:`.ops.MigrationScript` structure representing the changes, and with a little bit of insider information we can invoke them directly. The traversal through the :class:`.ops.MigrationScript` structure is as follows:: use_batch = engine.name == "sqlite" stack = [migrations.upgrade_ops] while stack: elem = stack.pop(0) if use_batch and isinstance(elem, ModifyTableOps): with operations.batch_alter_table( elem.table_name, schema=elem.schema ) as batch_ops: for table_elem in elem.ops: # work around Alembic issue #753 (fixed in 1.5.0) if hasattr(table_elem, "column"): table_elem.column = table_elem.column.copy() batch_ops.invoke(table_elem) elif hasattr(elem, "ops"): stack.extend(elem.ops) else: # work around Alembic issue #753 (fixed in 1.5.0) if hasattr(elem, "column"): elem.column = elem.column.copy() operations.invoke(elem) Above, we detect elements that have a collection of operations by looking for the ``.ops`` attribute. A check for :class:`.ModifyTableOps` allows us to use a batch context if we are supporting that. Finally there's a workaround for an Alembic issue that exists for SQLAlchemy 1.3.20 and greater combined with Alembic older than 1.5. A full example follows. The overall setup here is copied from the example at :func:`.autogenerate.compare_metadata`:: from sqlalchemy import Column from sqlalchemy import create_engine from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from alembic.autogenerate import produce_migrations from alembic.migration import MigrationContext from alembic.operations import Operations from alembic.operations.ops import ModifyTableOps engine = create_engine("sqlite://", echo=True) with engine.connect() as conn: conn.execute( """ create table foo ( id integer not null primary key, old_data varchar(50), x integer )""" ) conn.execute( """ create table bar ( data varchar(50) )""" ) metadata = MetaData() Table( "foo", metadata, Column("id", Integer, primary_key=True), Column("data", Integer), Column("x", Integer, nullable=False), ) Table("bat", metadata, Column("info", String(100))) mc = MigrationContext.configure(engine.connect()) migrations = produce_migrations(mc, metadata) operations = Operations(mc) use_batch = engine.name == "sqlite" stack = [migrations.upgrade_ops] while stack: elem = stack.pop(0) if use_batch and isinstance(elem, ModifyTableOps): with operations.batch_alter_table( elem.table_name, schema=elem.schema ) as batch_ops: for table_elem in elem.ops: # work around Alembic issue #753 (fixed in 1.5.0) if hasattr(table_elem, "column"): table_elem.column = table_elem.column.copy() batch_ops.invoke(table_elem) elif hasattr(elem, "ops"): stack.extend(elem.ops) else: # work around Alembic issue #753 (fixed in 1.5.0) if hasattr(elem, "column"): elem.column = elem.column.copy() operations.invoke(elem) Test current database revision is at head(s) ============================================ A recipe to determine if a database schema is up to date in terms of applying Alembic migrations. May be useful for test or installation suites to determine if the target database is up to date. Makes use of the :meth:`.MigrationContext.get_current_heads` as well as :meth:`.ScriptDirectory.get_heads` methods so that it accommodates for a branched revision tree:: from alembic import config, script from alembic.runtime import migration from sqlalchemy import engine def check_current_head(alembic_cfg, connectable): # type: (config.Config, engine.Engine) -> bool directory = script.ScriptDirectory.from_config(alembic_cfg) with connectable.begin() as connection: context = migration.MigrationContext.configure(connection) return set(context.get_current_heads()) == set(directory.get_heads()) e = engine.create_engine("mysql://scott:tiger@localhost/test", echo=True) cfg = config.Config("alembic.ini") print(check_current_head(cfg, e)) .. seealso:: :meth:`.MigrationContext.get_current_heads` :meth:`.ScriptDirectory.get_heads` Using Asyncio with Alembic ========================== SQLAlchemy version 1.4 introduced experimental support for asyncio, allowing use of most of its interface from async applications. Alembic currently does not provide an async api directly, but it can use an use SQLAlchemy Async engine to run the migrations and autogenerate. New configurations can use the template "async" to bootstrap an environment which can be used with async DBAPI like asyncpg, running the command:: alembic init -t async Existing configurations can be updated to use an async DBAPI by updating the ``env.py`` file that's used by Alembic to start its operations. In particular only ``run_migrations_online`` will need to be updated to be something like the example below:: import asyncio from sqlalchemy.ext.asyncio import AsyncEngine # ... no change required to the rest of the code def do_run_migrations(connection): context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() async def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = AsyncEngine( engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, future=True, ) ) async with connectable.connect() as connection: await connection.run_sync(do_run_migrations) await connectable.dispose() if context.is_offline_mode(): run_migrations_offline() else: asyncio.run(run_migrations_online()) An asnyc application can also interact with the Alembic api directly by using the SQLAlchemy ``run_sync`` method to adapt the non-async api of Alembic to an async consumer. alembic-rel_1_7_6/docs/build/front.rst000066400000000000000000000075631417624537100200630ustar00rootroot00000000000000============ Front Matter ============ Information about the Alembic project. Project Homepage ================ Alembic is hosted on GitHub at https://github.com/sqlalchemy/alembic under the SQLAlchemy organization. Releases and project status are available on Pypi at https://pypi.python.org/pypi/alembic. The most recent published version of this documentation should be at https://alembic.sqlalchemy.org. .. _installation: Installation ============ While Alembic can be installed system wide, it's more common that it's installed local to a `virtual environment `_ , as it also uses libraries such as SQLAlchemy and database drivers that are more appropriate for local installations. The documentation below is **only one kind of approach to installing Alembic for a project**; there are many such approaches. The documentation below is provided only for those users who otherwise have no specific project setup chosen. To build a virtual environment for a specific project, first we assume that `Python virtualenv `_ is installed systemwide. Then:: $ cd /path/to/your/project $ virtualenv .venv There is now a Python interpreter that you can access in ``/path/to/your/project/.venv/bin/python``, as well as the `pip `_ installer tool in ``/path/to/your/project/.venv/bin/pip``. We now install Alembic as follows:: $ /path/to/your/project/.venv/bin/pip install alembic The install will add the ``alembic`` command to the virtual environment. All operations with Alembic in terms of this specific virtual environment will then proceed through the usage of this command, as in:: $ /path/to/your/project/.venv/bin/alembic init . The next step is **optional**. If our project itself has a ``setup.py`` file, we can also install it in the local virtual environment in `editable mode `_:: $ /path/to/your/project/.venv/bin/pip install -e . If we don't "install" the project locally, that's fine as well; the default ``alembic.ini`` file includes a directive ``prepend_sys_path = .`` so that the local path is also in ``sys.path``. This allows us to run the ``alembic`` command line tool from this directory without our project being "installed" in that environment. .. versionchanged:: 1.5.5 Fixed a long-standing issue where the ``alembic`` command-line tool would not preserve the default ``sys.path`` of ``.`` by implementing ``prepend_sys_path`` option. As a final step, the `virtualenv activate `_ tool can be used so that the ``alembic`` command is available without any path information, within the context of the current shell:: $ source /path/to/your/project/.venv/bin/activate Dependencies ------------ Alembic's install process will ensure that SQLAlchemy_ is installed, in addition to other dependencies. Alembic will work with SQLAlchemy as of version **1.3.0**. .. versionchanged:: 1.5.0 Support for SQLAlchemy older than 1.3.0 was dropped. Alembic supports Python versions **3.6 and above** .. versionchanged:: 1.7 Alembic now supports Python 3.6 and newer; support for Python 2.7 has been dropped. Community ========= Alembic is developed by `Mike Bayer `_, and is loosely associated with the SQLAlchemy_, `Pylons `_, and `Openstack `_ projects. User issues, discussion of potential bugs and features should be posted to the Alembic Google Group at `sqlalchemy-alembic `_. .. _bugs: Bugs ==== Bugs and feature enhancements to Alembic should be reported on the `GitHub issue tracker `_. .. _SQLAlchemy: https://www.sqlalchemy.org alembic-rel_1_7_6/docs/build/index.rst000066400000000000000000000010251417624537100200250ustar00rootroot00000000000000=================================== Welcome to Alembic's documentation! =================================== `Alembic `_ is a lightweight database migration tool for usage with the `SQLAlchemy `_ Database Toolkit for Python. .. toctree:: :maxdepth: 3 front tutorial autogenerate offline naming batch branches ops cookbook api/index changelog Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` alembic-rel_1_7_6/docs/build/make.bat000066400000000000000000000060051417624537100175740ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation set SPHINXBUILD=sphinx-build set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Alembic.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Alembic.ghc goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end alembic-rel_1_7_6/docs/build/naming.rst000066400000000000000000000217351417624537100202010ustar00rootroot00000000000000.. _tutorial_constraint_names: The Importance of Naming Constraints ==================================== An important topic worth mentioning is that of constraint naming conventions. As we've proceeded here, we've talked about adding tables and columns, and we've also hinted at lots of other operations listed in :ref:`ops` such as those which support adding or dropping constraints like foreign keys and unique constraints. The way these constraints are referred to in migration scripts is by name, however these names by default are in most cases generated by the relational database in use, when the constraint is created. For example, if you emitted two CREATE TABLE statements like this on Postgresql:: test=> CREATE TABLE user_account (id INTEGER PRIMARY KEY); CREATE TABLE test=> CREATE TABLE user_order ( test(> id INTEGER PRIMARY KEY, test(> user_account_id INTEGER REFERENCES user_account(id)); CREATE TABLE Suppose we wanted to DROP the REFERENCES that we just applied to the ``user_order.user_account_id`` column, how do we do that? At the prompt, we'd use ``ALTER TABLE DROP CONSTRAINT ``, or if using Alembic we'd be using :meth:`.Operations.drop_constraint`. But both of those functions need a name - what's the name of this constraint? It does have a name, which in this case we can figure out by looking at the Postgresql catalog tables:: test=> SELECT r.conname FROM test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='user_order' AND r.contype = 'f' test-> ; conname --------------------------------- user_order_user_account_id_fkey (1 row) The name above is not something that Alembic or SQLAlchemy created; ``user_order_user_account_id_fkey`` is a naming scheme used internally by Postgresql to name constraints that are otherwise not named. This scheme doesn't seem so complicated, and we might want to just use our knowledge of it so that we know what name to use for our :meth:`.Operations.drop_constraint` call. But is that a good idea? What if for example we needed our code to run on Oracle as well. OK, certainly Oracle uses this same scheme, right? Or if not, something similar. Let's check:: Oracle Database 10g Express Edition Release 10.2.0.1.0 - Production SQL> CREATE TABLE user_account (id INTEGER PRIMARY KEY); Table created. SQL> CREATE TABLE user_order ( 2 id INTEGER PRIMARY KEY, 3 user_account_id INTEGER REFERENCES user_account(id)); Table created. SQL> SELECT constraint_name FROM all_constraints WHERE 2 table_name='USER_ORDER' AND constraint_type in ('R'); CONSTRAINT_NAME ----------------------------------------------------- SYS_C0029334 Oh, we can see that is.....much worse. Oracle's names are entirely unpredictable alphanumeric codes, and this will make being able to write migrations quite tedious, as we'd need to look up all these names. The solution to having to look up names is to make your own names. This is an easy, though tedious thing to do manually. For example, to create our model in SQLAlchemy ensuring we use names for foreign key constraints would look like:: from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey meta = MetaData() user_account = Table('user_account', meta, Column('id', Integer, primary_key=True) ) user_order = Table('user_order', meta, Column('id', Integer, primary_key=True), Column('user_order_id', Integer, ForeignKey('user_account.id', name='fk_user_order_id')) ) Simple enough, though this has some disadvantages. The first is that it's tedious; we need to remember to use a name for every :class:`~sqlalchemy.schema.ForeignKey` object, not to mention every :class:`~sqlalchemy.schema.UniqueConstraint`, :class:`~sqlalchemy.schema.CheckConstraint`, :class:`~sqlalchemy.schema.Index`, and maybe even :class:`~sqlalchemy.schema.PrimaryKeyConstraint` as well if we wish to be able to alter those too, and beyond all that, all the names have to be globally unique. Even with all that effort, if we have a naming scheme in mind, it's easy to get it wrong when doing it manually each time. What's worse is that manually naming constraints (and indexes) gets even more tedious in that we can no longer use convenience features such as the ``.unique=True`` or ``.index=True`` flag on :class:`~sqlalchemy.schema.Column`:: user_account = Table('user_account', meta, Column('id', Integer, primary_key=True), Column('name', String(50), unique=True) ) Above, the ``unique=True`` flag creates a :class:`~sqlalchemy.schema.UniqueConstraint`, but again, it's not named. If we want to name it, manually we have to forego the usage of ``unique=True`` and type out the whole constraint:: user_account = Table('user_account', meta, Column('id', Integer, primary_key=True), Column('name', String(50)), UniqueConstraint('name', name='uq_user_account_name') ) There's a solution to all this naming work, which is to use an **automated naming convention**. For some years, SQLAlchemy has encourgaged the use of DDL Events in order to create naming schemes. The :meth:`~sqlalchemy.events.DDLEvents.after_parent_attach` event in particular is the best place to intercept when :class:`~sqlalchemy.schema.Constraint` and :class:`~sqlalchemy.schema.Index` objects are being associated with a parent :class:`~sqlalchemy.schema.Table` object, and to assign a ``.name`` to the constraint while making use of the name of the table and associated columns. But there is also a better way to go, which is to make use of a feature new in SQLAlchemy 0.9.2 which makes use of the events behind the scenes known as :paramref:`~sqlalchemy.schema.MetaData.naming_convention`. Here, we can create a new :class:`~sqlalchemy.schema.MetaData` object while passing a dictionary referring to a naming scheme:: convention = { "ix": "ix_%(column_0_label)s", "uq": "uq_%(table_name)s_%(column_0_name)s", "ck": "ck_%(table_name)s_%(constraint_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s" } metadata = MetaData(naming_convention=convention) If we define our models using a :class:`~sqlalchemy.schema.MetaData` as above, the given naming convention dictionary will be used to provide names for all constraints and indexes. .. _autogen_naming_conventions: Integration of Naming Conventions into Operations, Autogenerate --------------------------------------------------------------- As of Alembic 0.6.4, the naming convention feature is integrated into the :class:`.Operations` object, so that the convention takes effect for any constraint that is otherwise unnamed. The naming convention is passed to :class:`.Operations` using the :paramref:`.MigrationsContext.configure.target_metadata` parameter in ``env.py``, which is normally configured when autogenerate is used:: # in your application's model: meta = MetaData(naming_convention={ "ix": "ix_%(column_0_label)s", "uq": "uq_%(table_name)s_%(column_0_name)s", "ck": "ck_%(table_name)s_%(constraint_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s" }) Base = declarative_base(metadata=meta) # .. in your Alembic env.py: # add your model's MetaData object here # for 'autogenerate' support from myapp import mymodel target_metadata = mymodel.Base.metadata # ... def run_migrations_online(): # ... context.configure( connection=connection, target_metadata=target_metadata ) Above, when we render a directive like the following:: op.add_column('sometable', Column('q', Boolean(name='q_bool'))) The Boolean type will render a CHECK constraint with the name ``"ck_sometable_q_bool"``, assuming the backend in use does not support native boolean types. We can also use op directives with constraints and not give them a name at all, if the naming convention doesn't require one. The value of ``None`` will be converted into a name that follows the appropriate naming conventions:: def upgrade(): op.create_unique_constraint(None, 'some_table', 'x') When autogenerate renders constraints in a migration script, it renders them typically with their completed name. If using at least Alembic 0.6.4 as well as SQLAlchemy 0.9.4, these will be rendered with a special directive :meth:`.Operations.f` which denotes that the string has already been tokenized:: def upgrade(): op.create_unique_constraint(op.f('uq_const_x'), 'some_table', 'x') For more detail on the naming convention feature, see :ref:`sqla:constraint_naming_conventions`. alembic-rel_1_7_6/docs/build/offline.rst000066400000000000000000000127641417624537100203540ustar00rootroot00000000000000Generating SQL Scripts (a.k.a. "Offline Mode") ============================================== A major capability of Alembic is to generate migrations as SQL scripts, instead of running them against the database - this is also referred to as *offline mode*. This is a critical feature when working in large organizations where access to DDL is restricted, and SQL scripts must be handed off to DBAs. Alembic makes this easy via the ``--sql`` option passed to any ``upgrade`` or ``downgrade`` command. We can, for example, generate a script that revises up to rev ``ae1027a6acf``:: $ alembic upgrade ae1027a6acf --sql INFO [alembic.context] Context class PostgresqlContext. INFO [alembic.context] Will assume transactional DDL. BEGIN; CREATE TABLE alembic_version ( version_num VARCHAR(32) NOT NULL ); INFO [alembic.context] Running upgrade None -> 1975ea83b712 CREATE TABLE account ( id SERIAL NOT NULL, name VARCHAR(50) NOT NULL, description VARCHAR(200), PRIMARY KEY (id) ); INFO [alembic.context] Running upgrade 1975ea83b712 -> ae1027a6acf ALTER TABLE account ADD COLUMN last_transaction_date TIMESTAMP WITHOUT TIME ZONE; INSERT INTO alembic_version (version_num) VALUES ('ae1027a6acf'); COMMIT; While the logging configuration dumped to standard error, the actual script was dumped to standard output - so in the absence of further configuration (described later in this section), we'd at first be using output redirection to generate a script:: $ alembic upgrade ae1027a6acf --sql > migration.sql Getting the Start Version -------------------------- Notice that our migration script started at the base - this is the default when using offline mode, as no database connection is present and there's no ``alembic_version`` table to read from. One way to provide a starting version in offline mode is to provide a range to the command line. This is accomplished by providing the "version" in ``start:end`` syntax:: $ alembic upgrade 1975ea83b712:ae1027a6acf --sql > migration.sql The ``start:end`` syntax is only allowed in offline mode; in "online" mode, the ``alembic_version`` table is always used to get at the current version. It's also possible to have the ``env.py`` script retrieve the "last" version from the local environment, such as from a local file. A scheme like this would basically treat a local file in the same way ``alembic_version`` works:: if context.is_offline_mode(): version_file = os.path.join(os.path.dirname(config.config_file_name), "version.txt") if os.path.exists(version_file): current_version = open(version_file).read() else: current_version = None context.configure(dialect_name=engine.name, starting_rev=current_version) context.run_migrations() end_version = context.get_revision_argument() if end_version and end_version != current_version: open(version_file, 'w').write(end_version) Writing Migration Scripts to Support Script Generation ------------------------------------------------------ The challenge of SQL script generation is that the scripts we generate can't rely upon any client/server database access. This means a migration script that pulls some rows into memory via a ``SELECT`` statement will not work in ``--sql`` mode. It's also important that the Alembic directives, all of which are designed specifically to work in both "live execution" as well as "offline SQL generation" mode, are used. Customizing the Environment --------------------------- Users of the ``--sql`` option are encouraged to hack their ``env.py`` files to suit their needs. The ``env.py`` script as provided is broken into two sections: ``run_migrations_online()`` and ``run_migrations_offline()``. Which function is run is determined at the bottom of the script by reading :meth:`.EnvironmentContext.is_offline_mode`, which basically determines if the ``--sql`` flag was enabled. For example, a multiple database configuration may want to run through each database and set the output of the migrations to different named files - the :meth:`.EnvironmentContext.configure` function accepts a parameter ``output_buffer`` for this purpose. Below we illustrate this within the ``run_migrations_offline()`` function:: from alembic import context import myapp import sys db_1 = myapp.db_1 db_2 = myapp.db_2 def run_migrations_offline(): """Run migrations *without* a SQL connection.""" for name, engine, file_ in [ ("db1", db_1, "db1.sql"), ("db2", db_2, "db2.sql"), ]: context.configure( url=engine.url, transactional_ddl=False, output_buffer=open(file_, 'w')) context.execute("-- running migrations for '%s'" % name) context.run_migrations(name=name) sys.stderr.write("Wrote file '%s'" % file_) def run_migrations_online(): """Run migrations *with* a SQL connection.""" for name, engine in [ ("db1", db_1), ("db2", db_2), ]: connection = engine.connect() context.configure(connection=connection) try: context.run_migrations(name=name) session.commit() except: session.rollback() raise if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() alembic-rel_1_7_6/docs/build/ops.rst000066400000000000000000000036511417624537100175260ustar00rootroot00000000000000.. _ops: =================== Operation Reference =================== This file provides documentation on Alembic migration directives. The directives here are used within user-defined migration files, within the ``upgrade()`` and ``downgrade()`` functions, as well as any functions further invoked by those. All directives exist as methods on a class called :class:`.Operations`. When migration scripts are run, this object is made available to the script via the ``alembic.op`` datamember, which is a *proxy* to an actual instance of :class:`.Operations`. Currently, ``alembic.op`` is a real Python module, populated with individual proxies for each method on :class:`.Operations`, so symbols can be imported safely from the ``alembic.op`` namespace. The :class:`.Operations` system is also fully extensible. See :ref:`operation_plugins` for details on this. A key design philosophy to the :ref:`alembic.operations.toplevel` methods is that to the greatest degree possible, they internally generate the appropriate SQLAlchemy metadata, typically involving :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Constraint` objects. This so that migration instructions can be given in terms of just the string names and/or flags involved. The exceptions to this rule include the :meth:`~.Operations.add_column` and :meth:`~.Operations.create_table` directives, which require full :class:`~sqlalchemy.schema.Column` objects, though the table metadata is still generated here. The functions here all require that a :class:`.MigrationContext` has been configured within the ``env.py`` script first, which is typically via :meth:`.EnvironmentContext.configure`. Under normal circumstances they are called from an actual migration script, which itself would be invoked by the :meth:`.EnvironmentContext.run_migrations` method. .. module:: alembic.operations .. autoclass:: Operations :members: .. autoclass:: BatchOperations :members:alembic-rel_1_7_6/docs/build/requirements.txt000066400000000000000000000005251417624537100214540ustar00rootroot00000000000000git+https://github.com/sqlalchemyorg/changelog.git#egg=changelog git+https://github.com/sqlalchemyorg/sphinx-paramlinks.git#egg=sphinx-paramlinks git+https://github.com/sqlalchemy/sqlalchemy.git python-dateutil # because there's a dependency in pyfiles.py Mako importlib-metadata;python_version<"3.8" importlib-resources;python_version<"3.9"alembic-rel_1_7_6/docs/build/tutorial.rst000066400000000000000000000601731417624537100205720ustar00rootroot00000000000000======== Tutorial ======== Alembic provides for the creation, management, and invocation of *change management* scripts for a relational database, using SQLAlchemy as the underlying engine. This tutorial will provide a full introduction to the theory and usage of this tool. To begin, make sure Alembic is installed as described at :ref:`installation`. As stated in the linked document, it is usually preferable that Alembic is installed in the **same module / Python path as that of the target project**, usually using a `Python virtual environment `_, so that when the ``alembic`` command is run, the Python script which is invoked by ``alembic``, namely your project's ``env.py`` script, will have access to your application's models. This is not strictly necessary in all cases, however in the vast majority of cases is usually preferred. The tutorial below assumes the ``alembic`` command line utility is present in the local path and when invoked, will have access to the same Python module environment as that of the target project. The Migration Environment ========================== Usage of Alembic starts with creation of the *Migration Environment*. This is a directory of scripts that is specific to a particular application. The migration environment is created just once, and is then maintained along with the application's source code itself. The environment is created using the ``init`` command of Alembic, and is then customizable to suit the specific needs of the application. The structure of this environment, including some generated migration scripts, looks like:: yourproject/ alembic/ env.py README script.py.mako versions/ 3512b954651e_add_account.py 2b1ae634e5cd_add_order_id.py 3adcc9a56557_rename_username_field.py The directory includes these directories/files: * ``yourproject`` - this is the root of your application's source code, or some directory within it. * ``alembic`` - this directory lives within your application's source tree and is the home of the migration environment. It can be named anything, and a project that uses multiple databases may even have more than one. * ``env.py`` - This is a Python script that is run whenever the alembic migration tool is invoked. At the very least, it contains instructions to configure and generate a SQLAlchemy engine, procure a connection from that engine along with a transaction, and then invoke the migration engine, using the connection as a source of database connectivity. The ``env.py`` script is part of the generated environment so that the way migrations run is entirely customizable. The exact specifics of how to connect are here, as well as the specifics of how the migration environment are invoked. The script can be modified so that multiple engines can be operated upon, custom arguments can be passed into the migration environment, application-specific libraries and models can be loaded in and made available. Alembic includes a set of initialization templates which feature different varieties of ``env.py`` for different use cases. * ``README`` - included with the various environment templates, should have something informative. * ``script.py.mako`` - This is a `Mako `_ template file which is used to generate new migration scripts. Whatever is here is used to generate new files within ``versions/``. This is scriptable so that the structure of each migration file can be controlled, including standard imports to be within each, as well as changes to the structure of the ``upgrade()`` and ``downgrade()`` functions. For example, the ``multidb`` environment allows for multiple functions to be generated using a naming scheme ``upgrade_engine1()``, ``upgrade_engine2()``. * ``versions/`` - This directory holds the individual version scripts. Users of other migration tools may notice that the files here don't use ascending integers, and instead use a partial GUID approach. In Alembic, the ordering of version scripts is relative to directives within the scripts themselves, and it is theoretically possible to "splice" version files in between others, allowing migration sequences from different branches to be merged, albeit carefully by hand. Creating an Environment ======================= With a basic understanding of what the environment is, we can create one using ``alembic init``. This will create an environment using the "generic" template:: $ cd /path/to/yourproject $ source /path/to/yourproject/.venv/bin/activate # assuming a local virtualenv $ alembic init alembic Where above, the ``init`` command was called to generate a migrations directory called ``alembic``:: Creating directory /path/to/yourproject/alembic...done Creating directory /path/to/yourproject/alembic/versions...done Generating /path/to/yourproject/alembic.ini...done Generating /path/to/yourproject/alembic/env.py...done Generating /path/to/yourproject/alembic/README...done Generating /path/to/yourproject/alembic/script.py.mako...done Please edit configuration/connection/logging settings in '/path/to/yourproject/alembic.ini' before proceeding. Alembic also includes other environment templates. These can be listed out using the ``list_templates`` command:: $ alembic list_templates Available templates: generic - Generic single-database configuration. async - Generic single-database configuration with an async dbapi. multidb - Rudimentary multi-database configuration. pylons - Configuration that reads from a Pylons project environment. Templates are used via the 'init' command, e.g.: alembic init --template pylons ./scripts Editing the .ini File ===================== Alembic placed a file ``alembic.ini`` into the current directory. This is a file that the ``alembic`` script looks for when invoked. This file can exist in a different directory, with the location to it specified by either the ``--config`` option for the ``alembic`` runner or the ``ALEMBIC_CONFIG`` environment variable (the former takes precedence). The file generated with the "generic" configuration looks like:: # A generic, single database configuration. [alembic] # path to migration scripts script_location = alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. # (new in 1.5.5) prepend_sys_path = . # timezone to use when rendering the date within the migration file # as well as the filename. # If specified, requires the python-dateutil library that can be # installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; This defaults # to ${script_location}/versions. When using multiple version # directories, initial revisions must be specified with --version-path. # The path separator used here should be the separator specified by "version_path_separator" below. # version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions # version path separator; As mentioned above, this is the character used to split # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. # Valid values for version_path_separator are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space version_path_separator = os # Use os.pathsep. Default configuration used for new projects. # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = driver://user:pass@localhost/dbname # [post_write_hooks] # This section defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, # against the "black" entrypoint # hooks = black # black.type = console_scripts # black.entrypoint = black # black.options = -l 79 REVISION_SCRIPT_FILENAME # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S The file is read using Python's :class:`ConfigParser.SafeConfigParser` object. The ``%(here)s`` variable is provided as a substitution variable, which can be used to produce absolute pathnames to directories and files, as we do above with the path to the Alembic script location. This file contains the following features: * ``[alembic]`` - this is the section read by Alembic to determine configuration. Alembic itself does not directly read any other areas of the file. The name "alembic" can be customized using the ``--name`` commandline flag; see :ref:`multiple_environments` for a basic example of this. * ``script_location`` - this is the location of the Alembic environment. It is normally specified as a filesystem location, either relative or absolute. If the location is a relative path, it's interpreted as relative to the current directory. This is the only key required by Alembic in all cases. The generation of the .ini file by the command ``alembic init alembic`` automatically placed the directory name ``alembic`` here. The special variable ``%(here)s`` can also be used, as in ``%(here)s/alembic``. For support of applications that package themselves into .egg files, the value can also be specified as a `package resource `_, in which case ``resource_filename()`` is used to find the file (new in 0.2.2). Any non-absolute URI which contains colons is interpreted here as a resource name, rather than a straight filename. * ``file_template`` - this is the naming scheme used to generate new migration files. The value present is the default, so is commented out. Tokens available include: * ``%%(rev)s`` - revision id * ``%%(slug)s`` - a truncated string derived from the revision message * ``%%(year)d``, ``%%(month).2d``, ``%%(day).2d``, ``%%(hour).2d``, ``%%(minute).2d``, ``%%(second).2d`` - components of the create date, by default ``datetime.datetime.now()`` unless the ``timezone`` configuration option is also used. * ``timezone`` - an optional timezone name (e.g. ``UTC``, ``EST5EDT``, etc.) that will be applied to the timestamp which renders inside the migration file's comment as well as within the filename. This option requires installing the ``python-dateutil`` library. If ``timezone`` is specified, the create date object is no longer derived from ``datetime.datetime.now()`` and is instead generated as:: datetime.datetime.utcnow().replace( tzinfo=dateutil.tz.tzutc() ).astimezone( dateutil.tz.gettz() ) * ``truncate_slug_length`` - defaults to 40, the max number of characters to include in the "slug" field. * ``sqlalchemy.url`` - A URL to connect to the database via SQLAlchemy. This configuration value is only used if the ``env.py`` file calls upon them; in the "generic" template, the call to ``config.get_main_option("sqlalchemy.url")`` in the ``run_migrations_offline()`` function and the call to ``engine_from_config(prefix="sqlalchemy.")`` in the ``run_migrations_online()`` function are where this key is referenced. If the SQLAlchemy URL should come from some other source, such as from environment variables or a global registry, or if the migration environment makes use of multiple database URLs, the developer is encouraged to alter the ``env.py`` file to use whatever methods are appropriate in order to acquire the database URL or URLs. * ``revision_environment`` - this is a flag which when set to the value 'true', will indicate that the migration environment script ``env.py`` should be run unconditionally when generating new revision files, as well as when running the ``alembic history`` command. * ``sourceless`` - when set to 'true', revision files that only exist as .pyc or .pyo files in the versions directory will be used as versions, allowing "sourceless" versioning folders. When left at the default of 'false', only .py files are consumed as version files. * ``version_locations`` - an optional list of revision file locations, to allow revisions to exist in multiple directories simultaneously. See :ref:`multiple_bases` for examples. * ``version_path_separator`` - a separator of ``version_locations`` paths. It should be defined if multiple ``version_locations`` is used. See :ref:`multiple_bases` for examples. * ``output_encoding`` - the encoding to use when Alembic writes the ``script.py.mako`` file into a new migration file. Defaults to ``'utf-8'``. * ``[loggers]``, ``[handlers]``, ``[formatters]``, ``[logger_*]``, ``[handler_*]``, ``[formatter_*]`` - these sections are all part of Python's standard logging configuration, the mechanics of which are documented at `Configuration File Format `_. As is the case with the database connection, these directives are used directly as the result of the ``logging.config.fileConfig()`` call present in the ``env.py`` script, which you're free to modify. For starting up with just a single database and the generic configuration, setting up the SQLAlchemy URL is all that's needed:: sqlalchemy.url = postgresql://scott:tiger@localhost/test .. _create_migration: Create a Migration Script ========================= With the environment in place we can create a new revision, using ``alembic revision``:: $ alembic revision -m "create account table" Generating /path/to/yourproject/alembic/versions/1975ea83b712_create_accoun t_table.py...done A new file ``1975ea83b712_create_account_table.py`` is generated. Looking inside the file:: """create account table Revision ID: 1975ea83b712 Revises: Create Date: 2011-11-08 11:40:27.089406 """ # revision identifiers, used by Alembic. revision = '1975ea83b712' down_revision = None branch_labels = None from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass The file contains some header information, identifiers for the current revision and a "downgrade" revision, an import of basic Alembic directives, and empty ``upgrade()`` and ``downgrade()`` functions. Our job here is to populate the ``upgrade()`` and ``downgrade()`` functions with directives that will apply a set of changes to our database. Typically, ``upgrade()`` is required while ``downgrade()`` is only needed if down-revision capability is desired, though it's probably a good idea. Another thing to notice is the ``down_revision`` variable. This is how Alembic knows the correct order in which to apply migrations. When we create the next revision, the new file's ``down_revision`` identifier would point to this one:: # revision identifiers, used by Alembic. revision = 'ae1027a6acf' down_revision = '1975ea83b712' Every time Alembic runs an operation against the ``versions/`` directory, it reads all the files in, and composes a list based on how the ``down_revision`` identifiers link together, with the ``down_revision`` of ``None`` representing the first file. In theory, if a migration environment had thousands of migrations, this could begin to add some latency to startup, but in practice a project should probably prune old migrations anyway (see the section :ref:`building_uptodate` for a description on how to do this, while maintaining the ability to build the current database fully). We can then add some directives to our script, suppose adding a new table ``account``:: def upgrade(): op.create_table( 'account', sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.String(50), nullable=False), sa.Column('description', sa.Unicode(200)), ) def downgrade(): op.drop_table('account') :meth:`~.Operations.create_table` and :meth:`~.Operations.drop_table` are Alembic directives. Alembic provides all the basic database migration operations via these directives, which are designed to be as simple and minimalistic as possible; there's no reliance upon existing table metadata for most of these directives. They draw upon a global "context" that indicates how to get at a database connection (if any; migrations can dump SQL/DDL directives to files as well) in order to invoke the command. This global context is set up, like everything else, in the ``env.py`` script. An overview of all Alembic directives is at :ref:`ops`. Running our First Migration =========================== We now want to run our migration. Assuming our database is totally clean, it's as yet unversioned. The ``alembic upgrade`` command will run upgrade operations, proceeding from the current database revision, in this example ``None``, to the given target revision. We can specify ``1975ea83b712`` as the revision we'd like to upgrade to, but it's easier in most cases just to tell it "the most recent", in this case ``head``:: $ alembic upgrade head INFO [alembic.context] Context class PostgresqlContext. INFO [alembic.context] Will assume transactional DDL. INFO [alembic.context] Running upgrade None -> 1975ea83b712 Wow that rocked! Note that the information we see on the screen is the result of the logging configuration set up in ``alembic.ini`` - logging the ``alembic`` stream to the console (standard error, specifically). The process which occurred here included that Alembic first checked if the database had a table called ``alembic_version``, and if not, created it. It looks in this table for the current version, if any, and then calculates the path from this version to the version requested, in this case ``head``, which is known to be ``1975ea83b712``. It then invokes the ``upgrade()`` method in each file to get to the target revision. Running our Second Migration ============================= Let's do another one so we have some things to play with. We again create a revision file:: $ alembic revision -m "Add a column" Generating /path/to/yourapp/alembic/versions/ae1027a6acf_add_a_column.py... done Let's edit this file and add a new column to the ``account`` table:: """Add a column Revision ID: ae1027a6acf Revises: 1975ea83b712 Create Date: 2011-11-08 12:37:36.714947 """ # revision identifiers, used by Alembic. revision = 'ae1027a6acf' down_revision = '1975ea83b712' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('account', sa.Column('last_transaction_date', sa.DateTime)) def downgrade(): op.drop_column('account', 'last_transaction_date') Running again to ``head``:: $ alembic upgrade head INFO [alembic.context] Context class PostgresqlContext. INFO [alembic.context] Will assume transactional DDL. INFO [alembic.context] Running upgrade 1975ea83b712 -> ae1027a6acf We've now added the ``last_transaction_date`` column to the database. Partial Revision Identifiers ============================= Any time we need to refer to a revision number explicitly, we have the option to use a partial number. As long as this number uniquely identifies the version, it may be used in any command in any place that version numbers are accepted:: $ alembic upgrade ae1 Above, we use ``ae1`` to refer to revision ``ae1027a6acf``. Alembic will stop and let you know if more than one version starts with that prefix. .. _relative_migrations: Relative Migration Identifiers ============================== Relative upgrades/downgrades are also supported. To move two versions from the current, a decimal value "+N" can be supplied:: $ alembic upgrade +2 Negative values are accepted for downgrades:: $ alembic downgrade -1 Relative identifiers may also be in terms of a specific revision. For example, to upgrade to revision ``ae1027a6acf`` plus two additional steps:: $ alembic upgrade ae10+2 Getting Information =================== With a few revisions present we can get some information about the state of things. First we can view the current revision:: $ alembic current INFO [alembic.context] Context class PostgresqlContext. INFO [alembic.context] Will assume transactional DDL. Current revision for postgresql://scott:XXXXX@localhost/test: 1975ea83b712 -> ae1027a6acf (head), Add a column ``head`` is displayed only if the revision identifier for this database matches the head revision. We can also view history with ``alembic history``; the ``--verbose`` option (accepted by several commands, including ``history``, ``current``, ``heads`` and ``branches``) will show us full information about each revision:: $ alembic history --verbose Rev: ae1027a6acf (head) Parent: 1975ea83b712 Path: /path/to/yourproject/alembic/versions/ae1027a6acf_add_a_column.py add a column Revision ID: ae1027a6acf Revises: 1975ea83b712 Create Date: 2014-11-20 13:02:54.849677 Rev: 1975ea83b712 Parent: Path: /path/to/yourproject/alembic/versions/1975ea83b712_add_account_table.py create account table Revision ID: 1975ea83b712 Revises: Create Date: 2014-11-20 13:02:46.257104 Viewing History Ranges ---------------------- Using the ``-r`` option to ``alembic history``, we can also view various slices of history. The ``-r`` argument accepts an argument ``[start]:[end]``, where either may be a revision number, symbols like ``head``, ``heads`` or ``base``, ``current`` to specify the current revision(s), as well as negative relative ranges for ``[start]`` and positive relative ranges for ``[end]``:: $ alembic history -r1975ea:ae1027 A relative range starting from three revs ago up to current migration, which will invoke the migration environment against the database to get the current migration:: $ alembic history -r-3:current View all revisions from 1975 to the head:: $ alembic history -r1975ea: Downgrading =========== We can illustrate a downgrade back to nothing, by calling ``alembic downgrade`` back to the beginning, which in Alembic is called ``base``:: $ alembic downgrade base INFO [alembic.context] Context class PostgresqlContext. INFO [alembic.context] Will assume transactional DDL. INFO [alembic.context] Running downgrade ae1027a6acf -> 1975ea83b712 INFO [alembic.context] Running downgrade 1975ea83b712 -> None Back to nothing - and up again:: $ alembic upgrade head INFO [alembic.context] Context class PostgresqlContext. INFO [alembic.context] Will assume transactional DDL. INFO [alembic.context] Running upgrade None -> 1975ea83b712 INFO [alembic.context] Running upgrade 1975ea83b712 -> ae1027a6acf Next Steps ========== The vast majority of Alembic environments make heavy use of the "autogenerate" feature. Continue onto the next section, :doc:`autogenerate`. alembic-rel_1_7_6/docs/build/unreleased/000077500000000000000000000000001417624537100203155ustar00rootroot00000000000000alembic-rel_1_7_6/docs/build/unreleased/README.txt000066400000000000000000000006351417624537100220170ustar00rootroot00000000000000Individual per-changelog files go here in .rst format, which are pulled in by changelog (version 0.4.0 or higher) to be rendered into the changelog_xx.rst file. At release time, the files here are removed and written directly into the changelog. Rationale is so that multiple changes being merged into gerrit don't produce conflicts. Note that gerrit does not support custom merge handlers unlike git itself. alembic-rel_1_7_6/pyproject.toml000066400000000000000000000000361417624537100170520ustar00rootroot00000000000000[tool.black] line-length = 79 alembic-rel_1_7_6/reap_dbs.py000066400000000000000000000013041417624537100162660ustar00rootroot00000000000000"""Drop Oracle, SQL Server databases that are left over from a multiprocessing test run. Currently the cx_Oracle driver seems to sometimes not release a TCP connection even if close() is called, which prevents the provisioning system from dropping a database in-process. For SQL Server, databases still remain in use after tests run and running a kill of all detected sessions does not seem to release the database in process. """ import logging import sys from sqlalchemy.testing import provision logging.basicConfig() logging.getLogger(provision.__name__).setLevel(logging.INFO) if hasattr(provision, "reap_dbs"): provision.reap_dbs(sys.argv[1]) else: provision.reap_oracle_dbs(sys.argv[1]) alembic-rel_1_7_6/setup.cfg000066400000000000000000000061031417624537100157600ustar00rootroot00000000000000[metadata] name = alembic # version comes from setup.py; setuptools # can't read the "attr:" here without importing # until version 47.0.0 which is too recent description = A database migration tool for SQLAlchemy. long_description = file: README.rst long_description_content_type = text/x-rst url=https://alembic.sqlalchemy.org author = Mike Bayer author_email = mike_mp@zzzcomputing.com license = MIT license_file = LICENSE classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers Environment :: Console License :: OSI Approved :: MIT License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Database :: Front-Ends [options] packages = find: include_package_data = true zip_safe = false python_requires = >=3.6 install_requires = SQLAlchemy>=1.3.0 Mako importlib-metadata;python_version<"3.9" importlib-resources;python_version<"3.9" [options.extras_require] tz = python-dateutil [options.package_data] alembic = *.pyi, py.typed [options.packages.find] exclude = test* examples* [options.exclude_package_data] '' = test* [options.entry_points] console_scripts = alembic = alembic.config:main [egg_info] tag_build=dev [upload_docs] upload-dir = docs/build/output/html [upload] sign = 1 identity = C4DAFEE1 [nosetests] with-sqla_testing = true where = tests [flake8] enable-extensions = G # E203 is due to https://github.com/PyCQA/pycodestyle/issues/373 ignore = A003, D, E203,E305,E711,E712,E721,E722,E741, N801,N802,N806, RST304,RST303,RST299,RST399, W503,W504 exclude = .venv,.git,.tox,dist,doc,*egg,build import-order-style = google application-import-names = alembic,tests per-file-ignores = **/__init__.py:F401 max-line-length = 79 [sqla_testing] requirement_cls=tests.requirements:DefaultRequirements profile_file=tests/profiles.txt [db] default=sqlite:///:memory: sqlite=sqlite:///:memory: sqlite_file=sqlite:///querytest.db postgresql=postgresql://scott:tiger@127.0.0.1:5432/test mysql=mysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 mariadb = mariadb://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 mssql = mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server oracle=oracle://scott:tiger@127.0.0.1:1521 oracle8=oracle://scott:tiger@127.0.0.1:1521/?use_ansi=0 [alembic] [tool:pytest] addopts= --tb native -v -r sfxX -p no:warnings -p no:logging --maxfail=25 python_files=tests/test_*.py [mypy] show_error_codes = True allow_redefinition = True [mypy-mako.*] ignore_missing_imports = True [mypy-sqlalchemy.testing.*] ignore_missing_imports = True [mypy-importlib_resources.*] ignore_missing_imports = True [mypy-importlib_metadata.*] ignore_missing_imports = True alembic-rel_1_7_6/setup.py000066400000000000000000000014331417624537100156520ustar00rootroot00000000000000import os import re import sys from setuptools import setup from setuptools.command.test import test as TestCommand v = open(os.path.join(os.path.dirname(__file__), "alembic", "__init__.py")) VERSION = ( re.compile(r""".*__version__ = ["'](.*?)["']""", re.S) .match(v.read()) .group(1) ) v.close() class UseTox(TestCommand): RED = 31 RESET_SEQ = "\033[0m" BOLD_SEQ = "\033[1m" COLOR_SEQ = "\033[1;%dm" def run_tests(self): sys.stderr.write( "%s%spython setup.py test is deprecated by pypa. Please invoke " "'tox' with no arguments for a basic test run.\n%s" % (self.COLOR_SEQ % self.RED, self.BOLD_SEQ, self.RESET_SEQ) ) sys.exit(1) setup( version=VERSION, cmdclass={"test": UseTox}, ) alembic-rel_1_7_6/tests/000077500000000000000000000000001417624537100153015ustar00rootroot00000000000000alembic-rel_1_7_6/tests/__init__.py000066400000000000000000000000001417624537100174000ustar00rootroot00000000000000alembic-rel_1_7_6/tests/_large_map.py000066400000000000000000000162251417624537100177470ustar00rootroot00000000000000from alembic.script.revision import Revision from alembic.script.revision import RevisionMap data = [ Revision("3fc8a578bc0a", ("4878cb1cb7f6", "454a0529f84e")), Revision("69285b0faaa", ("36c31e4e1c37", "3a3b24a31b57")), Revision("3b0452c64639", "2f1a0f3667f3"), Revision("2d9d787a496", "135b5fd31062"), Revision("184f65ed83af", "3b0452c64639"), Revision("430074f99c29", "54f871bfe0b0"), Revision("3ffb59981d9a", "519c9f3ce294"), Revision("454a0529f84e", ("40f6508e4373", "38a936c6ab11")), Revision("24c2620b2e3f", ("430074f99c29", "1f5ceb1ec255")), Revision("169a948471a9", "247ad6880f93"), Revision("2f1a0f3667f3", "17dd0f165262"), Revision("27227dc4fda8", "2a66d7c4d8a1"), Revision("4b2ad1ffe2e7", ("3b409f268da4", "4f8a9b79a063")), Revision("124ef6a17781", "2529684536da"), Revision("4789d9c82ca7", "593b8076fb2c"), Revision("64ed798bcc3", ("44ed1bf512a0", "169a948471a9")), Revision("2588a3c36a0f", "50c7b21c9089"), Revision("359329c2ebb", ("5810e9eff996", "339faa12616")), Revision("540bc5634bd", "3a5db5f31209"), Revision("20fe477817d2", "53d5ff905573"), Revision("4f8a9b79a063", ("3cf34fcd6473", "300209d8594")), Revision("6918589deaf", "3314c17f6e35"), Revision("1755e3b1481c", ("17b66754be21", "31b1d4b7fc95")), Revision("58c988e1aa4e", ("219240032b88", "f067f0b825c")), Revision("593b8076fb2c", "1d94175d221b"), Revision("38d069994064", ("46b70a57edc0", "3ed56beabfb7")), Revision("3e2f6c6d1182", "7f96a01461b"), Revision("1f6969597fe7", "1811bdae9e63"), Revision("17dd0f165262", "3cf02a593a68"), Revision("3cf02a593a68", "25a7ef58d293"), Revision("34dfac7edb2d", "28f4dd53ad3a"), Revision("4009c533e05d", "42ded7355da2"), Revision("5a0003c3b09c", ("3ed56beabfb7", "2028d94d3863")), Revision("38a936c6ab11", "2588a3c36a0f"), Revision("59223c5b7b36", "2f93dd880bae"), Revision("4121bd6e99e9", "540bc5634bd"), Revision("260714a3f2de", "6918589deaf"), Revision("ae77a2ed69b", "274fd2642933"), Revision("18ff1ab3b4c4", "430133b6d46c"), Revision("2b9a327527a9", ("359329c2ebb", "593b8076fb2c")), Revision("4e6167c75ed0", "325b273d61bd"), Revision("21ab11a7c5c4", ("3da31f3323ec", "22f26011d635")), Revision("3b93e98481b1", "4e28e2f4fe2f"), Revision("145d8f1e334d", "b4143d129e"), Revision("135b5fd31062", "1d94175d221b"), Revision("300209d8594", ("52804033910e", "593b8076fb2c")), Revision("8dca95cce28", "f034666cd80"), Revision("46b70a57edc0", ("145d8f1e334d", "4cc2960cbe19")), Revision("4d45e479fbb9", "2d9d787a496"), Revision("22f085bf8bbd", "540bc5634bd"), Revision("263e91fd17d8", "2b9a327527a9"), Revision("219240032b88", ("300209d8594", "2b9a327527a9")), Revision("325b273d61bd", "4b2ad1ffe2e7"), Revision("199943ccc774", "1aa674ccfa4e"), Revision("247ad6880f93", "1f6969597fe7"), Revision("4878cb1cb7f6", "28f4dd53ad3a"), Revision("2a66d7c4d8a1", "23f1ccb18d6d"), Revision("42b079245b55", "593b8076fb2c"), Revision("1cccf82219cb", ("20fe477817d2", "915c67915c2")), Revision("b4143d129e", ("159331d6f484", "504d5168afe1")), Revision("53d5ff905573", "3013877bf5bd"), Revision("1f5ceb1ec255", "3ffb59981d9a"), Revision("ef1c1c1531f", "4738812e6ece"), Revision("1f6963d1ae02", "247ad6880f93"), Revision("44d58f1d31f0", "18ff1ab3b4c4"), Revision("c3ebe64dfb5", ("3409c57b0da", "31f352e77045")), Revision("f067f0b825c", "359329c2ebb"), Revision("52ab2d3b57ce", "96d590bd82e"), Revision("3b409f268da4", ("20e90eb3eeb6", "263e91fd17d8")), Revision("5a4ca8889674", "4e6167c75ed0"), Revision("5810e9eff996", ("2d30d79c4093", "52804033910e")), Revision("40f6508e4373", "4ed16fad67a7"), Revision("1811bdae9e63", "260714a3f2de"), Revision("3013877bf5bd", ("8dca95cce28", "3fc8a578bc0a")), Revision("16426dbea880", "28f4dd53ad3a"), Revision("22f26011d635", ("4c93d063d2ba", "3b93e98481b1")), Revision("3409c57b0da", "17b66754be21"), Revision("44373001000f", ("42b079245b55", "219240032b88")), Revision("28f4dd53ad3a", "2e71fd90eb9d"), Revision("4cc2960cbe19", "504d5168afe1"), Revision("31f352e77045", ("17b66754be21", "22f085bf8bbd")), Revision("4ed16fad67a7", "f034666cd80"), Revision("3da31f3323ec", "4c93d063d2ba"), Revision("31b1d4b7fc95", "1cc4459fd115"), Revision("11bc0ff42f87", "28f4dd53ad3a"), Revision("3a5db5f31209", "59742a546b84"), Revision("20e90eb3eeb6", ("58c988e1aa4e", "44373001000f")), Revision("23f1ccb18d6d", "52ab2d3b57ce"), Revision("1d94175d221b", "21ab11a7c5c4"), Revision("36f1a410ed", "54f871bfe0b0"), Revision("181a149173e", "2ee35cac4c62"), Revision("171ad2f0c672", "4a4e0838e206"), Revision("2f93dd880bae", "540bc5634bd"), Revision("25a7ef58d293", None), Revision("7f96a01461b", "184f65ed83af"), Revision("b21f22233f", "3e2f6c6d1182"), Revision("52804033910e", "1d94175d221b"), Revision("1e6240aba5b3", ("4121bd6e99e9", "2c50d8bab6ee")), Revision("1cc4459fd115", "1e6240aba5b3"), Revision("274fd2642933", "4009c533e05d"), Revision("1aa674ccfa4e", ("59223c5b7b36", "42050bf030fd")), Revision("4e28e2f4fe2f", "596d7b9e11"), Revision("49ddec8c7a5e", ("124ef6a17781", "47578179e766")), Revision("3e9bb349cc46", "ef1c1c1531f"), Revision("2028d94d3863", "504d5168afe1"), Revision("159331d6f484", "34dfac7edb2d"), Revision("596d7b9e11", "171ad2f0c672"), Revision("3b96bcc8da76", "f034666cd80"), Revision("4738812e6ece", "78982bf5499"), Revision("3314c17f6e35", "27227dc4fda8"), Revision("30931c545bf", "2e71fd90eb9d"), Revision("2e71fd90eb9d", ("c3ebe64dfb5", "1755e3b1481c")), Revision("3ed56beabfb7", ("11bc0ff42f87", "69285b0faaa")), Revision("96d590bd82e", "3e9bb349cc46"), Revision("339faa12616", "4d45e479fbb9"), Revision("47578179e766", "2529684536da"), Revision("2ee35cac4c62", "b21f22233f"), Revision("50c7b21c9089", ("4ed16fad67a7", "3b96bcc8da76")), Revision("78982bf5499", "ae77a2ed69b"), Revision("519c9f3ce294", "2c50d8bab6ee"), Revision("2720fc75e5fd", "1cccf82219cb"), Revision("21638ec787ba", "44d58f1d31f0"), Revision("59742a546b84", "49ddec8c7a5e"), Revision("2d30d79c4093", "135b5fd31062"), Revision("f034666cd80", ("5a0003c3b09c", "38d069994064")), Revision("430133b6d46c", "181a149173e"), Revision("3a3b24a31b57", ("16426dbea880", "4cc2960cbe19")), Revision("2529684536da", ("64ed798bcc3", "1f6963d1ae02")), Revision("17b66754be21", ("19e0db9d806a", "24c2620b2e3f")), Revision("3cf34fcd6473", ("52804033910e", "4789d9c82ca7")), Revision("36c31e4e1c37", "504d5168afe1"), Revision("54f871bfe0b0", "519c9f3ce294"), Revision("4a4e0838e206", "2a7f37cf7770"), Revision("19e0db9d806a", ("430074f99c29", "36f1a410ed")), Revision("44ed1bf512a0", "247ad6880f93"), Revision("42050bf030fd", "2f93dd880bae"), Revision("2c50d8bab6ee", "199943ccc774"), Revision("504d5168afe1", ("28f4dd53ad3a", "30931c545bf")), Revision("915c67915c2", "3fc8a578bc0a"), Revision("2a7f37cf7770", "2720fc75e5fd"), Revision("4c93d063d2ba", "4e28e2f4fe2f"), Revision("42ded7355da2", "21638ec787ba"), ] map_ = RevisionMap(lambda: data) alembic-rel_1_7_6/tests/conftest.py000077500000000000000000000023451417624537100175070ustar00rootroot00000000000000#!/usr/bin/env python """ pytest plugin script. This script is an extension to py.test which installs SQLAlchemy's testing plugin into the local environment. """ import os import pytest os.environ["SQLALCHEMY_WARN_20"] = "true" pytest.register_assert_rewrite("sqlalchemy.testing.assertions") # ideally, SQLAlchemy would allow us to just import bootstrap, # but for now we have to use its "load from a file" approach # use bootstrapping so that test plugins are loaded # without touching the main library before coverage starts bootstrap_file = os.path.join( os.path.dirname(__file__), "..", "alembic", "testing", "plugin", "bootstrap.py", ) with open(bootstrap_file) as f: code = compile(f.read(), "bootstrap.py", "exec") to_bootstrap = "pytest" exec(code, globals(), locals()) try: from sqlalchemy.testing import asyncio except ImportError: pass else: asyncio.ENABLE_ASYNCIO = False from sqlalchemy.testing.plugin.pytestplugin import * # noqa wrap_pytest_sessionstart = pytest_sessionstart # noqa def pytest_sessionstart(session): wrap_pytest_sessionstart(session) from alembic.testing import warnings warnings.setup_filters() alembic-rel_1_7_6/tests/requirements.py000066400000000000000000000307251417624537100204050ustar00rootroot00000000000000from sqlalchemy import exc as sqla_exc from sqlalchemy import text from alembic.testing import exclusions from alembic.testing.requirements import SuiteRequirements from alembic.util import compat from alembic.util import sqla_compat class DefaultRequirements(SuiteRequirements): @property def unicode_string(self): return exclusions.skip_if(["oracle"]) @property def alter_column(self): return exclusions.skip_if(["sqlite"], "no ALTER COLUMN support") @property def schemas(self): """Target database must support external schemas, and have one named 'test_schema'.""" return exclusions.skip_if(["sqlite", "firebird"], "no schema support") @property def no_referential_integrity(self): """test will fail if referential integrity is enforced""" return exclusions.fails_on_everything_except("sqlite") @property def non_native_boolean(self): """test will fail if native boolean is provided""" return exclusions.fails_if( exclusions.LambdaPredicate( lambda config: config.db.dialect.supports_native_boolean ) ) @property def non_native_boolean_check_constraint(self): """backend creates a check constraint for booleans if enabled""" return exclusions.only_on( exclusions.LambdaPredicate( lambda config: not config.db.dialect.supports_native_boolean and config.db.dialect.non_native_boolean_check_constraint ) ) @property def check_constraints_w_enforcement(self): return exclusions.fails_on(["mysql", "mariadb"]) @property def unnamed_constraints(self): """constraints without names are supported.""" return exclusions.only_on(["sqlite"]) @property def fk_names(self): """foreign key constraints always have names in the DB""" return exclusions.fails_on("sqlite") @property def reflects_fk_options(self): return exclusions.open() @property def fk_initially(self): """backend supports INITIALLY option in foreign keys""" return exclusions.only_on(["postgresql"]) @property def fk_deferrable(self): """backend supports DEFERRABLE option in foreign keys""" return exclusions.only_on(["postgresql", "oracle"]) @property def fk_deferrable_is_reflected(self): return self.fk_deferrable + exclusions.fails_on("oracle") @property def fk_ondelete_restrict(self): return exclusions.only_on(["postgresql", "sqlite", "mysql"]) @property def fk_onupdate_restrict(self): return self.fk_onupdate + exclusions.fails_on(["mssql"]) @property def fk_ondelete_noaction(self): return exclusions.only_on( ["postgresql", "mysql", "mariadb", "sqlite", "mssql"] ) @property def fk_ondelete_is_reflected(self): def go(config): if exclusions.against(config, "mssql"): return not sqla_compat.sqla_14_26 else: return False return exclusions.fails_if(go) @property def fk_onupdate_is_reflected(self): def go(config): if exclusions.against(config, "mssql"): return not sqla_compat.sqla_14_26 else: return False return self.fk_onupdate + exclusions.fails_if(go) @property def fk_onupdate(self): return exclusions.only_on( ["postgresql", "mysql", "mariadb", "sqlite", "mssql"] ) @property def reflects_unique_constraints_unambiguously(self): return exclusions.fails_on(["mysql", "mariadb", "oracle"]) @property def reflects_indexes_w_sorting(self): # TODO: figure out what's happening on the SQLAlchemy side # when we reflect an index that has asc() / desc() on the column return exclusions.fails_on(["oracle"]) @property def long_names(self): if sqla_compat.sqla_14: return exclusions.skip_if("oracle<18") else: return exclusions.skip_if("oracle") @property def reflects_pk_names(self): """Target driver reflects the name of primary key constraints.""" return exclusions.fails_on_everything_except( "postgresql", "oracle", "mssql", "sybase", "sqlite" ) @property def datetime_timezone(self): """target dialect supports timezone with datetime types.""" return exclusions.only_on(["postgresql"]) @property def postgresql(self): return exclusions.only_on(["postgresql"]) @property def mysql(self): return exclusions.only_on(["mysql", "mariadb"]) @property def oracle(self): return exclusions.only_on(["oracle"]) @property def mssql(self): return exclusions.only_on(["mssql"]) @property def postgresql_uuid_ossp(self): def check_uuid_ossp(config): if not exclusions.against(config, "postgresql"): return False try: config.db.execute("SELECT uuid_generate_v4()") return True except: return False return exclusions.only_if(check_uuid_ossp) def _has_pg_extension(self, name): def check(config): if not exclusions.against(config, "postgresql"): return False with config.db.connect() as conn: count = conn.scalar( text( "SELECT count(*) FROM pg_extension " "WHERE extname='%s'" % name ) ) return bool(count) return exclusions.only_if(check, "needs %s extension" % name) @property def hstore(self): return self._has_pg_extension("hstore") @property def btree_gist(self): return self._has_pg_extension("btree_gist") @property def autoincrement_on_composite_pk(self): return exclusions.skip_if(["sqlite"], "not supported by database") @property def integer_subtype_comparisons(self): """if a compare of Integer and BigInteger is supported yet.""" return exclusions.skip_if(["oracle"], "not supported by alembic impl") @property def autocommit_isolation(self): """target database should support 'AUTOCOMMIT' isolation level""" return exclusions.only_on(["postgresql", "mysql", "mariadb"]) @property def computed_columns(self): # TODO: in theory if these could come from SQLAlchemy dialects # that would be helpful return self.computed_columns_api + exclusions.skip_if( ["postgresql < 12", "sqlite < 3.31", "mysql < 5.7"] ) @property def computed_reflects_as_server_default(self): # note that this rule will go away when SQLAlchemy correctly # supports reflection of the "computed" construct; the element # will consistently be present as both column.computed and # column.server_default for all supported backends. return ( self.computed_columns + exclusions.only_if( ["postgresql", "oracle"], "backend reflects computed construct as a server default", ) + exclusions.skip_if(self.computed_reflects_normally) ) @property def computed_doesnt_reflect_as_server_default(self): # note that this rule will go away when SQLAlchemy correctly # supports reflection of the "computed" construct; the element # will consistently be present as both column.computed and # column.server_default for all supported backends. return ( self.computed_columns + exclusions.skip_if( ["postgresql", "oracle"], "backend reflects computed construct as a server default", ) + exclusions.skip_if(self.computed_reflects_normally) ) @property def check_constraint_reflection(self): return exclusions.fails_on_everything_except( "postgresql", "sqlite", "oracle", self._mysql_and_check_constraints_exist, ) def mysql_check_col_name_change(self, config): # MySQL has check constraints that enforce an reflect, however # they prevent a column's name from being changed due to a bug in # MariaDB 10.2 as well as MySQL 8.0.16 if exclusions.against(config, ["mysql", "mariadb"]): if sqla_compat._is_mariadb(config.db.dialect): mnvi = sqla_compat._mariadb_normalized_version_info norm_version_info = mnvi(config.db.dialect) return norm_version_info >= (10, 2) and norm_version_info < ( 10, 2, 22, ) else: norm_version_info = config.db.dialect.server_version_info return norm_version_info >= (8, 0, 16) else: return True def _mysql_and_check_constraints_exist(self, config): # 1. we have mysql / mariadb and # 2. it enforces check constraints if exclusions.against(config, ["mysql", "mariadb"]): if sqla_compat._is_mariadb(config.db.dialect): mnvi = sqla_compat._mariadb_normalized_version_info norm_version_info = mnvi(config.db.dialect) return norm_version_info >= (10, 2) else: norm_version_info = config.db.dialect.server_version_info return norm_version_info >= (8, 0, 16) else: return False @property def json_type(self): return exclusions.only_on( [ lambda config: exclusions.against(config, "mysql") and ( ( not config.db.dialect._is_mariadb and exclusions.against(config, "mysql >= 5.7") ) or ( config.db.dialect._mariadb_normalized_version_info >= (10, 2, 7) ) ), "mariadb>=10.2.7", "postgresql >= 9.3", self._sqlite_json, self._mssql_json, ] ) def _mssql_json(self, config): if not sqla_compat.sqla_14: return False else: return exclusions.against(config, "mssql") def _sqlite_json(self, config): if not sqla_compat.sqla_14: return False elif not exclusions.against(config, "sqlite >= 3.9"): return False else: with config.db.connect() as conn: try: return ( conn.execute( text( """select json_extract('{"foo": "bar"}', """ """'$."foo"')""" ) ).scalar() == "bar" ) except sqla_exc.DBAPIError: return False @property def identity_columns(self): # TODO: in theory if these could come from SQLAlchemy dialects # that would be helpful return self.identity_columns_api + exclusions.only_on( ["postgresql >= 10", "oracle >= 12", "mssql"] ) @property def identity_columns_alter(self): # TODO: in theory if these could come from SQLAlchemy dialects # that would be helpful return self.identity_columns_api + exclusions.only_on( ["postgresql >= 10", "oracle >= 12"] ) @property def supports_identity_on_null(self): return self.identity_columns + exclusions.only_on(["oracle"]) @property def legacy_engine(self): return exclusions.only_if( lambda config: not getattr(config.db, "_is_future", False) ) @property def stubs_test(self): def requirements(): try: import black # noqa import zimports # noqa return False except Exception: return True imports = exclusions.skip_if( requirements, "black and zimports are required for this test" ) version = exclusions.only_if( lambda _: compat.py39, "python 3.9 is required" ) return imports + version alembic-rel_1_7_6/tests/test_autogen_composition.py000066400000000000000000000402351417624537100230030ustar00rootroot00000000000000import re from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.sql.sqltypes import DateTime from alembic import autogenerate from alembic.migration import MigrationContext from alembic.testing import eq_ from alembic.testing import TestBase from alembic.testing.suite._autogen_fixtures import _default_include_object from alembic.testing.suite._autogen_fixtures import AutogenTest from alembic.testing.suite._autogen_fixtures import ModelOne class AutogenerateDiffTest(ModelOne, AutogenTest, TestBase): __only_on__ = "sqlite" def test_render_nothing(self): context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "target_metadata": self.m1, "upgrade_token": "upgrades", "downgrade_token": "downgrades", }, ) template_args = {} autogenerate._render_migration_diffs(context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###""", ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###""", ) def test_render_nothing_batch(self): context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "target_metadata": self.m1, "upgrade_token": "upgrades", "downgrade_token": "downgrades", "alembic_module_prefix": "op.", "sqlalchemy_module_prefix": "sa.", "render_as_batch": True, "include_symbol": lambda name, schema: False, }, ) template_args = {} autogenerate._render_migration_diffs(context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###""", ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###""", ) def test_render_diffs_standard(self): """test a full render including indentation""" template_args = {} autogenerate._render_migration_diffs(self.context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.create_table('item', sa.Column('id', sa.Integer(), nullable=False), sa.Column('description', sa.String(length=100), nullable=True), sa.Column('order_id', sa.Integer(), nullable=True), sa.CheckConstraint('len(description) > 5'), sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ), sa.PrimaryKeyConstraint('id') ) op.drop_table('extra') op.add_column('address', sa.Column('street', sa.String(length=50), \ nullable=True)) op.create_unique_constraint('uq_email', 'address', ['email_address']) op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True)) op.alter_column('order', 'amount', existing_type=sa.NUMERIC(precision=8, scale=2), type_=sa.Numeric(precision=10, scale=2), nullable=True, existing_server_default=sa.text('0')) op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id']) op.alter_column('user', 'name', existing_type=sa.VARCHAR(length=50), nullable=False) op.alter_column('user', 'a1', existing_type=sa.TEXT(), server_default='x', existing_nullable=True) op.drop_index('pw_idx', table_name='user') op.drop_column('user', 'pw') # ### end Alembic commands ###""", ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \ nullable=True)) op.create_index('pw_idx', 'user', ['pw'], unique=False) op.alter_column('user', 'a1', existing_type=sa.TEXT(), server_default=None, existing_nullable=True) op.alter_column('user', 'name', existing_type=sa.VARCHAR(length=50), nullable=True) op.drop_constraint(None, 'order', type_='foreignkey') op.alter_column('order', 'amount', existing_type=sa.Numeric(precision=10, scale=2), type_=sa.NUMERIC(precision=8, scale=2), nullable=False, existing_server_default=sa.text('0')) op.drop_column('order', 'user_id') op.drop_constraint('uq_email', 'address', type_='unique') op.drop_column('address', 'street') op.create_table('extra', sa.Column('x', sa.CHAR(), nullable=True), sa.Column('uid', sa.INTEGER(), nullable=True), sa.ForeignKeyConstraint(['uid'], ['user.id'], ) ) op.drop_table('item') # ### end Alembic commands ###""", ) def test_render_diffs_batch(self): """test a full render in batch mode including indentation""" template_args = {} self.context.opts["render_as_batch"] = True autogenerate._render_migration_diffs(self.context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.create_table('item', sa.Column('id', sa.Integer(), nullable=False), sa.Column('description', sa.String(length=100), nullable=True), sa.Column('order_id', sa.Integer(), nullable=True), sa.CheckConstraint('len(description) > 5'), sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ), sa.PrimaryKeyConstraint('id') ) op.drop_table('extra') with op.batch_alter_table('address', schema=None) as batch_op: batch_op.add_column(sa.Column('street', sa.String(length=50), nullable=True)) batch_op.create_unique_constraint('uq_email', ['email_address']) with op.batch_alter_table('order', schema=None) as batch_op: batch_op.add_column(sa.Column('user_id', sa.Integer(), nullable=True)) batch_op.alter_column('amount', existing_type=sa.NUMERIC(precision=8, scale=2), type_=sa.Numeric(precision=10, scale=2), nullable=True, existing_server_default=sa.text('0')) batch_op.create_foreign_key(None, 'user', ['user_id'], ['id']) with op.batch_alter_table('user', schema=None) as batch_op: batch_op.alter_column('name', existing_type=sa.VARCHAR(length=50), nullable=False) batch_op.alter_column('a1', existing_type=sa.TEXT(), server_default='x', existing_nullable=True) batch_op.drop_index('pw_idx') batch_op.drop_column('pw') # ### end Alembic commands ###""", # noqa, ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('user', schema=None) as batch_op: batch_op.add_column(sa.Column('pw', sa.VARCHAR(length=50), nullable=True)) batch_op.create_index('pw_idx', ['pw'], unique=False) batch_op.alter_column('a1', existing_type=sa.TEXT(), server_default=None, existing_nullable=True) batch_op.alter_column('name', existing_type=sa.VARCHAR(length=50), nullable=True) with op.batch_alter_table('order', schema=None) as batch_op: batch_op.drop_constraint(None, type_='foreignkey') batch_op.alter_column('amount', existing_type=sa.Numeric(precision=10, scale=2), type_=sa.NUMERIC(precision=8, scale=2), nullable=False, existing_server_default=sa.text('0')) batch_op.drop_column('user_id') with op.batch_alter_table('address', schema=None) as batch_op: batch_op.drop_constraint('uq_email', type_='unique') batch_op.drop_column('street') op.create_table('extra', sa.Column('x', sa.CHAR(), nullable=True), sa.Column('uid', sa.INTEGER(), nullable=True), sa.ForeignKeyConstraint(['uid'], ['user.id'], ) ) op.drop_table('item') # ### end Alembic commands ###""", # noqa, ) def test_imports_maintined(self): template_args = {} self.context.opts["render_as_batch"] = True def render_item(type_, col, autogen_context): autogen_context.imports.add( "from mypackage import my_special_import" ) autogen_context.imports.add("from foobar import bat") self.context.opts["render_item"] = render_item autogenerate._render_migration_diffs(self.context, template_args) eq_( set(template_args["imports"].split("\n")), set( [ "from foobar import bat", "from mypackage import my_special_import", ] ), ) class AddColumnOrderTest(AutogenTest, TestBase): @classmethod def _get_db_schema(cls): m = MetaData() Table( "user", m, Column("id", Integer, primary_key=True), Column("name", String(50)), ) return m @classmethod def _get_model_schema(cls): m = MetaData() Table( "user", m, Column("id", Integer, primary_key=True), Column("name", String(50)), Column("username", String(50)), Column("password_hash", String(32)), Column("timestamp", DateTime), ) return m def test_render_add_columns(self): """test #827""" template_args = {} autogenerate._render_migration_diffs(self.context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('username', sa.String(length=50), nullable=True)) op.add_column('user', sa.Column('password_hash', sa.String(length=32), nullable=True)) op.add_column('user', sa.Column('timestamp', sa.DateTime(), nullable=True)) # ### end Alembic commands ###""", # noqa E501 ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.drop_column('user', 'timestamp') op.drop_column('user', 'password_hash') op.drop_column('user', 'username') # ### end Alembic commands ###""", ) class AutogenerateDiffTestWSchema(ModelOne, AutogenTest, TestBase): __only_on__ = "postgresql" schema = "test_schema" def test_render_nothing(self): context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "target_metadata": self.m1, "upgrade_token": "upgrades", "downgrade_token": "downgrades", "alembic_module_prefix": "op.", "sqlalchemy_module_prefix": "sa.", "include_object": lambda name, *args: False, }, ) template_args = {} autogenerate._render_migration_diffs(context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###""", ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###""", ) def test_render_diffs_extras(self): """test a full render including indentation (include and schema)""" template_args = {} self.context.opts.update( { "include_object": _default_include_object, "include_schemas": True, } ) autogenerate._render_migration_diffs(self.context, template_args) eq_( re.sub(r"u'", "'", template_args["upgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.create_table('item', sa.Column('id', sa.Integer(), nullable=False), sa.Column('description', sa.String(length=100), nullable=True), sa.Column('order_id', sa.Integer(), nullable=True), sa.CheckConstraint('len(description) > 5'), sa.ForeignKeyConstraint(['order_id'], ['%(schema)s.order.order_id'], ), sa.PrimaryKeyConstraint('id'), schema='%(schema)s' ) op.drop_table('extra', schema='%(schema)s') op.add_column('address', sa.Column('street', sa.String(length=50), \ nullable=True), schema='%(schema)s') op.create_unique_constraint('uq_email', 'address', ['email_address'], \ schema='test_schema') op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True), \ schema='%(schema)s') op.alter_column('order', 'amount', existing_type=sa.NUMERIC(precision=8, scale=2), type_=sa.Numeric(precision=10, scale=2), nullable=True, existing_server_default=sa.text('0'), schema='%(schema)s') op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id'], \ source_schema='%(schema)s', referent_schema='%(schema)s') op.alter_column('user', 'name', existing_type=sa.VARCHAR(length=50), nullable=False, schema='%(schema)s') op.alter_column('user', 'a1', existing_type=sa.TEXT(), server_default='x', existing_nullable=True, schema='%(schema)s') op.drop_index('pw_idx', table_name='user', schema='test_schema') op.drop_column('user', 'pw', schema='%(schema)s') # ### end Alembic commands ###""" % {"schema": self.schema}, ) eq_( re.sub(r"u'", "'", template_args["downgrades"]), """# ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \ autoincrement=False, nullable=True), schema='%(schema)s') op.create_index('pw_idx', 'user', ['pw'], unique=False, schema='%(schema)s') op.alter_column('user', 'a1', existing_type=sa.TEXT(), server_default=None, existing_nullable=True, schema='%(schema)s') op.alter_column('user', 'name', existing_type=sa.VARCHAR(length=50), nullable=True, schema='%(schema)s') op.drop_constraint(None, 'order', schema='%(schema)s', type_='foreignkey') op.alter_column('order', 'amount', existing_type=sa.Numeric(precision=10, scale=2), type_=sa.NUMERIC(precision=8, scale=2), nullable=False, existing_server_default=sa.text('0'), schema='%(schema)s') op.drop_column('order', 'user_id', schema='%(schema)s') op.drop_constraint('uq_email', 'address', schema='test_schema', type_='unique') op.drop_column('address', 'street', schema='%(schema)s') op.create_table('extra', sa.Column('x', sa.CHAR(length=1), autoincrement=False, nullable=True), sa.Column('uid', sa.INTEGER(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['uid'], ['%(schema)s.user.id'], \ name='extra_uid_fkey'), schema='%(schema)s' ) op.drop_table('item', schema='%(schema)s') # ### end Alembic commands ###""" # noqa % {"schema": self.schema}, ) alembic-rel_1_7_6/tests/test_autogen_diffs.py000066400000000000000000001606011417624537100215330ustar00rootroot00000000000000from sqlalchemy import BIGINT from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import CHAR from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import DATE from sqlalchemy import DateTime from sqlalchemy import DECIMAL from sqlalchemy import Enum from sqlalchemy import FLOAT from sqlalchemy import ForeignKey from sqlalchemy import ForeignKeyConstraint from sqlalchemy import Index from sqlalchemy import inspect from sqlalchemy import INTEGER from sqlalchemy import Integer from sqlalchemy import JSON from sqlalchemy import LargeBinary from sqlalchemy import MetaData from sqlalchemy import Numeric from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import SmallInteger from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text from sqlalchemy import text from sqlalchemy import TIMESTAMP from sqlalchemy import TypeDecorator from sqlalchemy import Unicode from sqlalchemy import UniqueConstraint from sqlalchemy import VARCHAR from sqlalchemy.dialects import mysql from sqlalchemy.dialects import sqlite from sqlalchemy.types import NULLTYPE from sqlalchemy.types import VARBINARY from alembic import autogenerate from alembic import testing from alembic.autogenerate import api from alembic.migration import MigrationContext from alembic.operations import ops from alembic.testing import assert_raises_message from alembic.testing import config from alembic.testing import eq_ from alembic.testing import is_ from alembic.testing import is_not_ from alembic.testing import mock from alembic.testing import schemacompare from alembic.testing import TestBase from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.suite._autogen_fixtures import _default_name_filters from alembic.testing.suite._autogen_fixtures import _default_object_filters from alembic.testing.suite._autogen_fixtures import AutogenFixtureTest from alembic.testing.suite._autogen_fixtures import AutogenTest from alembic.util import CommandError # TODO: we should make an adaptation of CompareMetadataToInspectorTest that is # more well suited towards generic backends (2021-06-10) class AutogenCrossSchemaTest(AutogenTest, TestBase): __only_on__ = "postgresql" __backend__ = True @classmethod def _get_db_schema(cls): m = MetaData() Table("t1", m, Column("x", Integer)) Table("t2", m, Column("y", Integer), schema=config.test_schema) Table("t6", m, Column("u", Integer)) Table("t7", m, Column("v", Integer), schema=config.test_schema) return m @classmethod def _get_model_schema(cls): m = MetaData() Table("t3", m, Column("q", Integer)) Table("t4", m, Column("z", Integer), schema=config.test_schema) Table("t6", m, Column("u", Integer)) Table("t7", m, Column("v", Integer), schema=config.test_schema) return m def test_default_schema_omitted_upgrade(self): def include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name == "t3" else: return True self._update_context( object_filters=include_object, include_schemas=True ) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_(diffs[0][0], "add_table") eq_(diffs[0][1].schema, None) def test_default_schema_omitted_by_table_name_upgrade(self): def include_name(name, type_, parent_names): if type_ == "table": retval = name in ["t1", "t6"] if retval: eq_(parent_names["schema_name"], None) eq_(parent_names["schema_qualified_table_name"], name) else: eq_(parent_names["schema_name"], config.test_schema) eq_( parent_names["schema_qualified_table_name"], "%s.%s" % (config.test_schema, name), ) return retval else: return True self._update_context(name_filters=include_name, include_schemas=True) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_( {(d[0], d[1].name) for d in diffs}, { ("add_table", "t3"), ("add_table", "t4"), ("remove_table", "t1"), ("add_table", "t7"), }, ) def test_default_schema_omitted_by_schema_name_upgrade(self): def include_name(name, type_, parent_names): if type_ == "schema": assert not parent_names return name is None else: return True self._update_context(name_filters=include_name, include_schemas=True) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_( {(d[0], d[1].name) for d in diffs}, { ("add_table", "t3"), ("add_table", "t4"), ("remove_table", "t1"), ("add_table", "t7"), }, ) def test_alt_schema_included_upgrade(self): def include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name == "t4" else: return True self._update_context( object_filters=include_object, include_schemas=True ) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_(diffs[0][0], "add_table") eq_(diffs[0][1].schema, config.test_schema) def test_alt_schema_included_by_schema_name(self): def include_name(name, type_, parent_names): if type_ == "schema": assert not parent_names return name == config.test_schema else: return True self._update_context(name_filters=include_name, include_schemas=True) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) # does not include "t1" in drops because t1 is in default schema # includes "t6" in adds because t6 is in default schema, was omitted, # so reflection added it diffs = uo.as_diffs() eq_( {(d[0], d[1].name) for d in diffs}, { ("add_table", "t3"), ("add_table", "t6"), ("add_table", "t4"), ("remove_table", "t2"), }, ) def test_default_schema_omitted_downgrade(self): def include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name == "t1" else: return True self._update_context( object_filters=include_object, include_schemas=True ) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_(diffs[0][0], "remove_table") eq_(diffs[0][1].schema, None) def test_alt_schema_included_downgrade(self): def include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name == "t2" else: return True self._update_context( object_filters=include_object, include_schemas=True ) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_(diffs[0][0], "remove_table") eq_(diffs[0][1].schema, config.test_schema) class AutogenDefaultSchemaTest(AutogenFixtureTest, TestBase): __only_on__ = "postgresql" __backend__ = True def test_uses_explcit_schema_in_default_one(self): default_schema = self.bind.dialect.default_schema_name m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", String(50))) Table("a", m2, Column("x", String(50)), schema=default_schema) diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs, []) def test_uses_explcit_schema_in_default_two(self): default_schema = self.bind.dialect.default_schema_name m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", String(50))) Table("a", m2, Column("x", String(50)), schema=default_schema) Table("a", m2, Column("y", String(50)), schema="test_schema") diffs = self._fixture(m1, m2, include_schemas=True) eq_(len(diffs), 1) eq_(diffs[0][0], "add_table") eq_(diffs[0][1].schema, "test_schema") eq_(diffs[0][1].c.keys(), ["y"]) def test_uses_explcit_schema_in_default_three(self): default_schema = self.bind.dialect.default_schema_name m1 = MetaData() m2 = MetaData() Table("a", m1, Column("y", String(50)), schema="test_schema") Table("a", m2, Column("x", String(50)), schema=default_schema) Table("a", m2, Column("y", String(50)), schema="test_schema") diffs = self._fixture(m1, m2, include_schemas=True) eq_(len(diffs), 1) eq_(diffs[0][0], "add_table") eq_(diffs[0][1].schema, default_schema) eq_(diffs[0][1].c.keys(), ["x"]) class AutogenDefaultSchemaIsNoneTest(AutogenFixtureTest, TestBase): __only_on__ = "sqlite" def setUp(self): super(AutogenDefaultSchemaIsNoneTest, self).setUp() # in SQLAlchemy 1.4, SQLite dialect is setting this name # to "main" as is the actual default schema name for SQLite. self.bind.dialect.default_schema_name = None # prerequisite eq_(self.bind.dialect.default_schema_name, None) def test_no_default_schema(self): m1 = MetaData() m2 = MetaData() Table("a", m1, Column("x", String(50))) Table("a", m2, Column("x", String(50))) def _include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name in "a" and obj.schema != "main" else: return True diffs = self._fixture( m1, m2, include_schemas=True, object_filters=_include_object ) eq_(len(diffs), 0) class ModelOne: __requires__ = ("unique_constraint_reflection",) schema = None @classmethod def _get_db_schema(cls): schema = cls.schema m = MetaData(schema=schema) Table( "user", m, Column("id", Integer, primary_key=True), Column("name", String(50)), Column("a1", Text), Column("pw", String(50)), Index("pw_idx", "pw"), ) Table( "address", m, Column("id", Integer, primary_key=True), Column("email_address", String(100), nullable=False), ) Table( "order", m, Column("order_id", Integer, primary_key=True), Column( "amount", Numeric(8, 2), nullable=False, server_default=text("0"), ), CheckConstraint("amount >= 0", name="ck_order_amount"), ) Table( "extra", m, Column("x", CHAR), Column("uid", Integer, ForeignKey("user.id")), ) return m @classmethod def _get_model_schema(cls): schema = cls.schema m = MetaData(schema=schema) Table( "user", m, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", Text, server_default="x"), ) Table( "address", m, Column("id", Integer, primary_key=True), Column("email_address", String(100), nullable=False), Column("street", String(50)), UniqueConstraint("email_address", name="uq_email"), ) Table( "order", m, Column("order_id", Integer, primary_key=True), Column( "amount", Numeric(10, 2), nullable=True, server_default=text("0"), ), Column("user_id", Integer, ForeignKey("user.id")), CheckConstraint("amount > -1", name="ck_order_amount"), ) Table( "item", m, Column("id", Integer, primary_key=True), Column("description", String(100)), Column("order_id", Integer, ForeignKey("order.order_id")), CheckConstraint("len(description) > 5"), ) return m class AutogenerateDiffTest(ModelOne, AutogenTest, TestBase): __only_on__ = "sqlite" def test_diffs(self): """test generation of diff rules""" metadata = self.m2 uo = ops.UpgradeOps(ops=[]) ctx = self.autogen_context autogenerate._produce_net_changes(ctx, uo) diffs = uo.as_diffs() eq_( diffs[0], ("add_table", schemacompare.CompareTable(metadata.tables["item"])), ) eq_(diffs[1][0], "remove_table") eq_(diffs[1][1].name, "extra") eq_(diffs[2][0], "add_column") eq_(diffs[2][1], None) eq_(diffs[2][2], "address") eq_(diffs[2][3], metadata.tables["address"].c.street) eq_(diffs[3][0], "add_constraint") eq_(diffs[3][1].name, "uq_email") eq_(diffs[4][0], "add_column") eq_(diffs[4][1], None) eq_(diffs[4][2], "order") eq_(diffs[4][3], metadata.tables["order"].c.user_id) eq_(diffs[5][0][0], "modify_type") eq_(diffs[5][0][1], None) eq_(diffs[5][0][2], "order") eq_(diffs[5][0][3], "amount") eq_(repr(diffs[5][0][5]), "NUMERIC(precision=8, scale=2)") eq_(repr(diffs[5][0][6]), "Numeric(precision=10, scale=2)") self._assert_fk_diff( diffs[6], "add_fk", "order", ["user_id"], "user", ["id"] ) eq_(diffs[7][0][0], "modify_nullable") eq_(diffs[7][0][5], True) eq_(diffs[7][0][6], False) eq_(diffs[8][0][0], "modify_default") eq_(diffs[8][0][1], None) eq_(diffs[8][0][2], "user") eq_(diffs[8][0][3], "a1") eq_(diffs[8][0][6].arg, "x") eq_(diffs[9][0], "remove_index") eq_(diffs[9][1].name, "pw_idx") eq_(diffs[10][0], "remove_column") eq_(diffs[10][3].name, "pw") eq_(diffs[10][3].table.name, "user") assert isinstance(diffs[10][3].type, String) def test_include_object(self): def include_object(obj, name, type_, reflected, compare_to): assert obj.name == name if type_ == "table": if reflected: assert obj.metadata is not self.m2 else: assert obj.metadata is self.m2 return name in ("address", "order", "user") elif type_ == "column": if reflected: assert obj.table.metadata is not self.m2 else: assert obj.table.metadata is self.m2 return name != "street" else: return True context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "target_metadata": self.m2, "include_object": include_object, }, ) diffs = autogenerate.compare_metadata( context, context.opts["target_metadata"] ) alter_cols = ( set( [ d[2] for d in self._flatten_diffs(diffs) if d[0].startswith("modify") ] ) .union( d[3].name for d in self._flatten_diffs(diffs) if d[0] == "add_column" ) .union( d[1].name for d in self._flatten_diffs(diffs) if d[0] == "add_table" ) ) eq_(alter_cols, set(["user_id", "order", "user"])) def test_include_name(self): all_names = set() def include_name(name, type_, parent_names): all_names.add((name, type_, parent_names.get("table_name", None))) if type_ == "table": eq_( parent_names, {"schema_name": None, "schema_qualified_table_name": name}, ) return name in ("address", "order", "user") elif type_ == "column": return name != "street" else: return True context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "target_metadata": self.m2, "include_name": include_name, }, ) diffs = autogenerate.compare_metadata( context, context.opts["target_metadata"] ) eq_( all_names, { (None, "schema", None), ("user", "table", None), ("id", "column", "user"), ("name", "column", "user"), ("a1", "column", "user"), ("pw", "column", "user"), ("pw_idx", "index", "user"), ("order", "table", None), ("order_id", "column", "order"), ("amount", "column", "order"), ("address", "table", None), ("id", "column", "address"), ("email_address", "column", "address"), ("extra", "table", None), }, ) alter_cols = ( set( [ d[2] for d in self._flatten_diffs(diffs) if d[0].startswith("modify") ] ) .union( d[3].name for d in self._flatten_diffs(diffs) if d[0] == "add_column" ) .union( d[1].name for d in self._flatten_diffs(diffs) if d[0] == "add_table" ) ) eq_(alter_cols, {"user_id", "order", "user", "street", "item"}) def test_skip_null_type_comparison_reflected(self): ac = ops.AlterColumnOp("sometable", "somecol") autogenerate.compare._compare_type( self.autogen_context, ac, None, "sometable", "somecol", Column("somecol", NULLTYPE), Column("somecol", Integer()), ) diff = ac.to_diff_tuple() assert not diff def test_skip_null_type_comparison_local(self): ac = ops.AlterColumnOp("sometable", "somecol") autogenerate.compare._compare_type( self.autogen_context, ac, None, "sometable", "somecol", Column("somecol", Integer()), Column("somecol", NULLTYPE), ) diff = ac.to_diff_tuple() assert not diff def test_custom_type_compare(self): class MyType(TypeDecorator): impl = Integer def compare_against_backend(self, dialect, conn_type): return isinstance(conn_type, Integer) ac = ops.AlterColumnOp("sometable", "somecol") autogenerate.compare._compare_type( self.autogen_context, ac, None, "sometable", "somecol", Column("somecol", INTEGER()), Column("somecol", MyType()), ) assert not ac.has_changes() ac = ops.AlterColumnOp("sometable", "somecol") autogenerate.compare._compare_type( self.autogen_context, ac, None, "sometable", "somecol", Column("somecol", String()), Column("somecol", MyType()), ) diff = ac.to_diff_tuple() eq_(diff[0][0:4], ("modify_type", None, "sometable", "somecol")) def test_affinity_typedec(self): class MyType(TypeDecorator): impl = CHAR def load_dialect_impl(self, dialect): if dialect.name == "sqlite": return dialect.type_descriptor(Integer()) else: return dialect.type_descriptor(CHAR(32)) uo = ops.AlterColumnOp("sometable", "somecol") autogenerate.compare._compare_type( self.autogen_context, uo, None, "sometable", "somecol", Column("somecol", Integer, nullable=True), Column("somecol", MyType()), ) assert not uo.has_changes() def test_dont_barf_on_already_reflected(self): from sqlalchemy.util import OrderedSet inspector = inspect(self.bind) uo = ops.UpgradeOps(ops=[]) autogenerate.compare._compare_tables( OrderedSet([(None, "extra"), (None, "user")]), OrderedSet(), inspector, uo, self.autogen_context, ) eq_( [(rec[0], rec[1].name) for rec in uo.as_diffs()], [ ("remove_table", "extra"), ("remove_index", "pw_idx"), ("remove_table", "user"), ], ) class AutogenerateDiffTestWSchema(ModelOne, AutogenTest, TestBase): __only_on__ = "postgresql" __backend__ = True schema = "test_schema" def test_diffs(self): """test generation of diff rules""" metadata = self.m2 self._update_context(include_schemas=True) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_( diffs[0], ( "add_table", schemacompare.CompareTable( metadata.tables["%s.item" % self.schema] ), ), ) eq_(diffs[1][0], "remove_table") eq_(diffs[1][1].name, "extra") eq_(diffs[2][0], "add_column") eq_(diffs[2][1], self.schema) eq_(diffs[2][2], "address") eq_( schemacompare.CompareColumn( metadata.tables["%s.address" % self.schema].c.street ), diffs[2][3], ) eq_(diffs[3][0], "add_constraint") eq_(diffs[3][1].name, "uq_email") eq_(diffs[4][0], "add_column") eq_(diffs[4][1], self.schema) eq_(diffs[4][2], "order") eq_( schemacompare.CompareColumn( metadata.tables["%s.order" % self.schema].c.user_id ), diffs[4][3], ) eq_(diffs[5][0][0], "modify_type") eq_(diffs[5][0][1], self.schema) eq_(diffs[5][0][2], "order") eq_(diffs[5][0][3], "amount") eq_(repr(diffs[5][0][5]), "NUMERIC(precision=8, scale=2)") eq_(repr(diffs[5][0][6]), "Numeric(precision=10, scale=2)") self._assert_fk_diff( diffs[6], "add_fk", "order", ["user_id"], "user", ["id"], source_schema=config.test_schema, ) eq_(diffs[7][0][0], "modify_nullable") eq_(diffs[7][0][5], True) eq_(diffs[7][0][6], False) eq_(diffs[8][0][0], "modify_default") eq_(diffs[8][0][1], self.schema) eq_(diffs[8][0][2], "user") eq_(diffs[8][0][3], "a1") eq_(diffs[8][0][6].arg, "x") eq_(diffs[9][0], "remove_index") eq_(diffs[9][1].name, "pw_idx") eq_(diffs[10][0], "remove_column") eq_(diffs[10][3].name, "pw") class CompareTypeSpecificityTest(TestBase): @testing.fixture def impl_fixture(self): from alembic.ddl import impl from sqlalchemy.engine import default return impl.DefaultImpl( default.DefaultDialect(), None, False, True, None, {} ) def test_typedec_to_nonstandard(self, impl_fixture): class PasswordType(TypeDecorator): impl = VARBINARY def copy(self, **kw): return PasswordType(self.impl.length) def load_dialect_impl(self, dialect): if dialect.name == "default": impl = sqlite.NUMERIC(self.length) else: impl = VARBINARY(self.length) return dialect.type_descriptor(impl) impl_fixture.compare_type( Column("x", sqlite.NUMERIC(50)), Column("x", PasswordType(50)) ) @testing.combinations( (VARCHAR(30), String(30), False), (VARCHAR(30), String(40), True), (VARCHAR(30), Integer(), True), (Text(), String(255), True), # insp + metadata types same number of # args but are different; they're different (DECIMAL(10, 5), DECIMAL(10, 6), True), # insp + metadata types, inspected type # has an additional arg; assume this is additional # default precision on the part of the DB, assume they are # equivalent (DECIMAL(10, 5), DECIMAL(10), False), # insp + metadata types, metadata type # has an additional arg; this can go either way, either the # metadata has extra precision, or the DB doesn't support the # element, go with consider them equivalent for now (DECIMAL(10), DECIMAL(10, 5), False), (DECIMAL(10, 2), Numeric(10), False), (DECIMAL(10, 5), Numeric(10, 5), False), (DECIMAL(10, 5), Numeric(12, 5), True), (DECIMAL(10, 5), DateTime(), True), (Numeric(), Numeric(scale=5), False), (INTEGER(), Integer(), False), (BIGINT(), Integer(), True), (BIGINT(), BigInteger(), False), (BIGINT(), SmallInteger(), True), (INTEGER(), SmallInteger(), True), (Integer(), String(), True), id_="ssa", argnames="inspected_type,metadata_type,expected", ) def test_compare_type( self, impl_fixture, inspected_type, metadata_type, expected ): is_( impl_fixture.compare_type( Column("x", inspected_type), Column("x", metadata_type) ), expected, ) class CompareMetadataToInspectorTest(TestBase): __backend__ = True @classmethod def _get_bind(cls): return config.db configure_opts = {} def setUp(self): staging_env() self.bind = self._get_bind() self.m1 = MetaData() def tearDown(self): self.m1.drop_all(self.bind) clear_staging_env() def _compare_columns(self, cola, colb): Table("sometable", self.m1, Column("col", cola)) self.m1.create_all(self.bind) m2 = MetaData() Table("sometable", m2, Column("col", colb)) ctx_opts = { "compare_type": True, "compare_server_default": True, "target_metadata": m2, "upgrade_token": "upgrades", "downgrade_token": "downgrades", "alembic_module_prefix": "op.", "sqlalchemy_module_prefix": "sa.", "include_object": _default_object_filters, "include_name": _default_name_filters, } if self.configure_opts: ctx_opts.update(self.configure_opts) with self.bind.connect() as conn: context = MigrationContext.configure( connection=conn, opts=ctx_opts ) autogen_context = api.AutogenContext(context, m2) uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(autogen_context, uo) return bool(uo.as_diffs()) @testing.combinations( (INTEGER(),), (CHAR(),), (VARCHAR(32),), (Text(),), (FLOAT(),), (Numeric(),), (DECIMAL(),), (TIMESTAMP(),), (DateTime(),), (Boolean(),), (BigInteger(),), (SmallInteger(),), (DATE(),), (String(32),), (LargeBinary(),), (Unicode(32),), (JSON(), config.requirements.json_type), (mysql.LONGTEXT(), config.requirements.mysql), (Enum("one", "two", "three", name="the_enum"),), ) def test_introspected_columns_match_metadata_columns(self, cola): # this is ensuring false positives aren't generated for types # that have not changed. is_(self._compare_columns(cola, cola), False) # TODO: ideally the backend-specific types would be tested # within the test suites for those backends. @testing.combinations( (String(32), VARCHAR(32), False), (VARCHAR(6), String(6), False), (CHAR(), String(1), True), (Text(), VARCHAR(255), True), (Unicode(32), String(32), False, config.requirements.unicode_string), (Unicode(32), VARCHAR(32), False, config.requirements.unicode_string), (VARCHAR(6), VARCHAR(12), True), (VARCHAR(6), String(12), True), (Integer(), String(10), True), (String(10), Integer(), True), ( Unicode(30, collation="en_US"), Unicode(30, collation="en_US"), False, # unfortunately dialects don't seem to consistently # reflect collations right now so we can't test for # positives here config.requirements.postgresql, ), ( mysql.VARCHAR(200, charset="utf8"), Unicode(200), False, config.requirements.mysql, ), ( mysql.VARCHAR(200, charset="latin1"), mysql.VARCHAR(200, charset="utf-8"), True, config.requirements.mysql, ), ( String(255, collation="utf8_bin"), String(255), False, config.requirements.mysql, ), ( String(255, collation="utf8_bin"), String(255, collation="latin1_bin"), True, config.requirements.mysql, ), ) def test_string_comparisons(self, cola, colb, expect_changes): is_(self._compare_columns(cola, colb), expect_changes) @testing.combinations( ( DateTime(), DateTime(timezone=False), False, config.requirements.datetime_timezone, ), ( DateTime(), DateTime(timezone=True), True, config.requirements.datetime_timezone, ), ( DateTime(timezone=True), DateTime(timezone=False), True, config.requirements.datetime_timezone, ), ) def test_datetime_comparisons(self, cola, colb, expect_changes): is_(self._compare_columns(cola, colb), expect_changes) @testing.combinations( (Integer(), Integer(), False), ( Integer(), Numeric(8, 0), True, config.requirements.integer_subtype_comparisons, ), (Numeric(8, 0), Numeric(8, 2), True), ( BigInteger(), Integer(), True, config.requirements.integer_subtype_comparisons, ), ( SmallInteger(), Integer(), True, config.requirements.integer_subtype_comparisons, ), ( # note that the mysql.INTEGER tests only use these params # if the dialect is "mysql". however we also test that their # dialect-agnostic representation compares by running this # against other dialects. mysql.INTEGER(unsigned=True, display_width=10), mysql.INTEGER(unsigned=True, display_width=10), False, ), (mysql.INTEGER(unsigned=True), mysql.INTEGER(unsigned=True), False), ( mysql.INTEGER(unsigned=True, display_width=10), mysql.INTEGER(unsigned=True), False, ), ( mysql.INTEGER(unsigned=True), mysql.INTEGER(unsigned=True, display_width=10), False, ), ) def test_numeric_comparisons(self, cola, colb, expect_changes): is_(self._compare_columns(cola, colb), expect_changes) class AutogenSystemColTest(AutogenTest, TestBase): __only_on__ = "postgresql" @classmethod def _get_db_schema(cls): m = MetaData() Table("sometable", m, Column("id", Integer, primary_key=True)) return m @classmethod def _get_model_schema(cls): m = MetaData() # 'xmin' is implicitly present, when added to a model should produce # no change Table( "sometable", m, Column("id", Integer, primary_key=True), Column("xmin", Integer, system=True), ) return m def test_dont_add_system(self): uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_(diffs, []) class AutogenerateVariantCompareTest(AutogenTest, TestBase): __backend__ = True @classmethod def _get_db_schema(cls): m = MetaData() Table( "sometable", m, Column( "id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True, ), Column("value", String(50)), ) return m @classmethod def _get_model_schema(cls): m = MetaData() Table( "sometable", m, Column( "id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True, ), Column("value", String(50)), ) return m def test_variant_no_issue(self): uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(self.autogen_context, uo) diffs = uo.as_diffs() eq_(diffs, []) class AutogenerateCustomCompareTypeTest(AutogenTest, TestBase): __only_on__ = "sqlite" @classmethod def _get_db_schema(cls): m = MetaData() Table( "sometable", m, Column("id", Integer, primary_key=True), Column("value", Integer), ) return m @classmethod def _get_model_schema(cls): m = MetaData() Table( "sometable", m, Column("id", Integer, primary_key=True), Column("value", String), ) return m def test_uses_custom_compare_type_function(self): my_compare_type = mock.Mock() self.context._user_compare_type = my_compare_type uo = ops.UpgradeOps(ops=[]) ctx = self.autogen_context autogenerate._produce_net_changes(ctx, uo) first_table = self.m2.tables["sometable"] first_column = first_table.columns["id"] eq_(len(my_compare_type.mock_calls), 2) # We'll just test the first call _, args, _ = my_compare_type.mock_calls[0] ( context, inspected_column, metadata_column, inspected_type, metadata_type, ) = args eq_(context, self.context) eq_(metadata_column, first_column) eq_(metadata_type, first_column.type) eq_(inspected_column.name, first_column.name) eq_(type(inspected_type), INTEGER) def test_column_type_not_modified_custom_compare_type_returns_False(self): my_compare_type = mock.Mock() my_compare_type.return_value = False self.context._user_compare_type = my_compare_type diffs = [] ctx = self.autogen_context diffs = [] autogenerate._produce_net_changes(ctx, diffs) eq_(diffs, []) def test_column_type_modified_custom_compare_type_returns_True(self): my_compare_type = mock.Mock() my_compare_type.return_value = True self.context._user_compare_type = my_compare_type ctx = self.autogen_context uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(ctx, uo) diffs = uo.as_diffs() eq_(diffs[0][0][0], "modify_type") eq_(diffs[1][0][0], "modify_type") class IncludeFiltersAPITest(AutogenTest, TestBase): @classmethod def _get_db_schema(cls): return MetaData() @classmethod def _get_model_schema(cls): return MetaData() def test_run_name_filters_supports_extension_types(self): include_name = mock.Mock() self._update_context(name_filters=include_name, include_schemas=True) self.autogen_context.run_name_filters( name="some_function", type_="function", parent_names={"schema_name": "public"}, ) eq_( include_name.mock_calls, [ mock.call( "some_function", "function", {"schema_name": "public"} ) ], ) def test_run_object_filters_supports_extension_types(self): include_object = mock.Mock() self._update_context( object_filters=include_object, include_schemas=True ) class ExtFunction: pass extfunc = ExtFunction() self.autogen_context.run_object_filters( object_=extfunc, name="some_function", type_="function", reflected=False, compare_to=None, ) eq_( include_object.mock_calls, [mock.call(extfunc, "some_function", "function", False, None)], ) class PKConstraintUpgradesIgnoresNullableTest(AutogenTest, TestBase): __backend__ = True # test behavior for issue originally observed in SQLAlchemy issue #3023, # alembic issue #199 @classmethod def _get_db_schema(cls): m = MetaData() Table( "person_to_role", m, Column("person_id", Integer, autoincrement=False), Column("role_id", Integer, autoincrement=False), PrimaryKeyConstraint("person_id", "role_id"), ) return m @classmethod def _get_model_schema(cls): return cls._get_db_schema() def test_no_change(self): uo = ops.UpgradeOps(ops=[]) ctx = self.autogen_context autogenerate._produce_net_changes(ctx, uo) diffs = uo.as_diffs() eq_(diffs, []) class AutogenKeyTest(AutogenTest, TestBase): __only_on__ = "sqlite" @classmethod def _get_db_schema(cls): m = MetaData() Table( "someothertable", m, Column("id", Integer, primary_key=True), Column("value", Integer, key="somekey"), ) return m @classmethod def _get_model_schema(cls): m = MetaData() Table( "sometable", m, Column("id", Integer, primary_key=True), Column("value", Integer, key="someotherkey"), ) Table( "someothertable", m, Column("id", Integer, primary_key=True), Column("value", Integer, key="somekey"), Column("othervalue", Integer, key="otherkey"), ) return m symbols = ["someothertable", "sometable"] def test_autogen(self): uo = ops.UpgradeOps(ops=[]) ctx = self.autogen_context autogenerate._produce_net_changes(ctx, uo) diffs = uo.as_diffs() eq_(diffs[0][0], "add_table") eq_(diffs[0][1].name, "sometable") eq_(diffs[1][0], "add_column") eq_(diffs[1][3].key, "otherkey") class AutogenVersionTableTest(AutogenTest, TestBase): __only_on__ = "sqlite" version_table_name = "alembic_version" version_table_schema = None @classmethod def _get_db_schema(cls): m = MetaData() Table( cls.version_table_name, m, Column("x", Integer), schema=cls.version_table_schema, ) return m @classmethod def _get_model_schema(cls): m = MetaData() return m def test_no_version_table(self): ctx = self.autogen_context uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(ctx, uo) eq_(uo.as_diffs(), []) def test_version_table_in_target(self): Table( self.version_table_name, self.m2, Column("x", Integer), schema=self.version_table_schema, ) ctx = self.autogen_context uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(ctx, uo) eq_(uo.as_diffs(), []) class AutogenCustomVersionTableSchemaTest(AutogenVersionTableTest): __only_on__ = "postgresql" __backend__ = True version_table_schema = "test_schema" configure_opts = {"version_table_schema": "test_schema"} class AutogenCustomVersionTableTest(AutogenVersionTableTest): version_table_name = "my_version_table" configure_opts = {"version_table": "my_version_table"} class AutogenCustomVersionTableAndSchemaTest(AutogenVersionTableTest): __only_on__ = "postgresql" __backend__ = True version_table_name = "my_version_table" version_table_schema = "test_schema" configure_opts = { "version_table": "my_version_table", "version_table_schema": "test_schema", } class AutogenerateDiffOrderTest(AutogenTest, TestBase): __only_on__ = "sqlite" @classmethod def _get_db_schema(cls): return MetaData() @classmethod def _get_model_schema(cls): m = MetaData() Table("parent", m, Column("id", Integer, primary_key=True)) Table( "child", m, Column("parent_id", Integer, ForeignKey("parent.id")) ) return m def test_diffs_order(self): """ Added in order to test that child tables(tables with FKs) are generated before their parent tables """ ctx = self.autogen_context uo = ops.UpgradeOps(ops=[]) autogenerate._produce_net_changes(ctx, uo) diffs = uo.as_diffs() eq_(diffs[0][0], "add_table") eq_(diffs[0][1].name, "parent") eq_(diffs[1][0], "add_table") eq_(diffs[1][1].name, "child") class CompareMetadataTest(ModelOne, AutogenTest, TestBase): __only_on__ = "sqlite" def test_compare_metadata(self): metadata = self.m2 diffs = autogenerate.compare_metadata(self.context, metadata) eq_( diffs[0], ("add_table", schemacompare.CompareTable(metadata.tables["item"])), ) eq_(diffs[1][0], "remove_table") eq_(diffs[1][1].name, "extra") eq_(diffs[2][0], "add_column") eq_(diffs[2][1], None) eq_(diffs[2][2], "address") eq_(diffs[2][3], metadata.tables["address"].c.street) eq_(diffs[3][0], "add_constraint") eq_(diffs[3][1].name, "uq_email") eq_(diffs[4][0], "add_column") eq_(diffs[4][1], None) eq_(diffs[4][2], "order") eq_(diffs[4][3], metadata.tables["order"].c.user_id) eq_(diffs[5][0][0], "modify_type") eq_(diffs[5][0][1], None) eq_(diffs[5][0][2], "order") eq_(diffs[5][0][3], "amount") eq_(repr(diffs[5][0][5]), "NUMERIC(precision=8, scale=2)") eq_(repr(diffs[5][0][6]), "Numeric(precision=10, scale=2)") self._assert_fk_diff( diffs[6], "add_fk", "order", ["user_id"], "user", ["id"] ) eq_(diffs[7][0][0], "modify_nullable") eq_(diffs[7][0][5], True) eq_(diffs[7][0][6], False) eq_(diffs[8][0][0], "modify_default") eq_(diffs[8][0][1], None) eq_(diffs[8][0][2], "user") eq_(diffs[8][0][3], "a1") eq_(diffs[8][0][6].arg, "x") eq_(diffs[9][0], "remove_index") eq_(diffs[9][1].name, "pw_idx") eq_(diffs[10][0], "remove_column") eq_(diffs[10][3].name, "pw") def test_compare_metadata_include_object(self): metadata = self.m2 def include_object(obj, name, type_, reflected, compare_to): if type_ == "table": return name in ("extra", "order") elif type_ == "column": return name != "amount" else: return True context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "include_object": include_object, }, ) diffs = autogenerate.compare_metadata(context, metadata) eq_(diffs[0][0], "remove_table") eq_(diffs[0][1].name, "extra") eq_(diffs[1][0], "add_column") eq_(diffs[1][1], None) eq_(diffs[1][2], "order") eq_(diffs[1][3], metadata.tables["order"].c.user_id) def test_compare_metadata_include_name(self): metadata = self.m2 all_names = set() def include_name(name, type_, parent_names): all_names.add((name, type_, parent_names.get("table_name", None))) if type_ == "table": return name in ("extra", "order") elif type_ == "column": return name != "amount" else: return True context = MigrationContext.configure( connection=self.bind.connect(), opts={ "compare_type": True, "compare_server_default": True, "include_name": include_name, }, ) diffs = autogenerate.compare_metadata(context, metadata) eq_( all_names, { ("user", "table", None), ("order", "table", None), ("address", "table", None), (None, "schema", None), ("amount", "column", "order"), ("extra", "table", None), ("order_id", "column", "order"), }, ) eq_( { ( d[0], d[3].name if d[0] == "add_column" else d[1].name, d[2] if d[0] == "add_column" else None, ) for d in diffs }, { ("remove_table", "extra", None), ("add_fk", None, None), ("add_column", "amount", "order"), ("add_table", "user", None), ("add_table", "item", None), ("add_column", "user_id", "order"), ("add_table", "address", None), }, ) def test_compare_metadata_as_sql(self): context = MigrationContext.configure( connection=self.bind.connect(), opts={"as_sql": True} ) metadata = self.m2 assert_raises_message( CommandError, "autogenerate can't use as_sql=True as it prevents " "querying the database for schema information", autogenerate.compare_metadata, context, metadata, ) class PGCompareMetaData(ModelOne, AutogenTest, TestBase): __only_on__ = "postgresql" __backend__ = True schema = "test_schema" def test_compare_metadata_schema(self): metadata = self.m2 context = MigrationContext.configure( connection=self.bind.connect(), opts={"include_schemas": True} ) diffs = autogenerate.compare_metadata(context, metadata) eq_( diffs[0], ( "add_table", schemacompare.CompareTable( metadata.tables["test_schema.item"] ), ), ) eq_(diffs[1][0], "remove_table") eq_(diffs[1][1].name, "extra") eq_(diffs[2][0], "add_column") eq_(diffs[2][1], "test_schema") eq_(diffs[2][2], "address") eq_( schemacompare.CompareColumn( metadata.tables["test_schema.address"].c.street ), diffs[2][3], ) eq_(diffs[3][0], "add_constraint") eq_(diffs[3][1].name, "uq_email") eq_(diffs[4][0], "add_column") eq_(diffs[4][1], "test_schema") eq_(diffs[4][2], "order") eq_( schemacompare.CompareColumn( metadata.tables["test_schema.order"].c.user_id ), diffs[4][3], ) eq_(diffs[5][0][0], "modify_nullable") eq_(diffs[5][0][5], False) eq_(diffs[5][0][6], True) class OrigObjectTest(TestBase): def setUp(self): self.metadata = m = MetaData() t = Table( "t", m, Column("id", Integer(), primary_key=True), Column("x", Integer()), ) self.ix = Index("ix1", t.c.id) fk = ForeignKeyConstraint(["t_id"], ["t.id"]) q = Table("q", m, Column("t_id", Integer()), fk) self.table = t self.fk = fk self.ck = CheckConstraint(t.c.x > 5) t.append_constraint(self.ck) self.uq = UniqueConstraint(q.c.t_id) self.pk = t.primary_key def test_drop_fk(self): fk = self.fk op = ops.DropConstraintOp.from_constraint(fk) eq_(op.to_constraint(), schemacompare.CompareForeignKey(fk)) eq_(op.reverse().to_constraint(), schemacompare.CompareForeignKey(fk)) def test_add_fk(self): fk = self.fk op = ops.AddConstraintOp.from_constraint(fk) eq_(op.to_constraint(), schemacompare.CompareForeignKey(fk)) eq_(op.reverse().to_constraint(), schemacompare.CompareForeignKey(fk)) is_not_(None, op.to_constraint().table) def test_add_check(self): ck = self.ck op = ops.AddConstraintOp.from_constraint(ck) eq_(op.to_constraint(), schemacompare.CompareCheckConstraint(ck)) eq_( op.reverse().to_constraint(), schemacompare.CompareCheckConstraint(ck), ) is_not_(None, op.to_constraint().table) def test_drop_check(self): ck = self.ck op = ops.DropConstraintOp.from_constraint(ck) eq_(op.to_constraint(), schemacompare.CompareCheckConstraint(ck)) eq_( op.reverse().to_constraint(), schemacompare.CompareCheckConstraint(ck), ) is_not_(None, op.to_constraint().table) def test_add_unique(self): uq = self.uq op = ops.AddConstraintOp.from_constraint(uq) eq_(op.to_constraint(), schemacompare.CompareUniqueConstraint(uq)) eq_( op.reverse().to_constraint(), schemacompare.CompareUniqueConstraint(uq), ) is_not_(None, op.to_constraint().table) def test_drop_unique(self): uq = self.uq op = ops.DropConstraintOp.from_constraint(uq) eq_(op.to_constraint(), schemacompare.CompareUniqueConstraint(uq)) eq_( op.reverse().to_constraint(), schemacompare.CompareUniqueConstraint(uq), ) is_not_(None, op.to_constraint().table) def test_add_pk_no_orig(self): op = ops.CreatePrimaryKeyOp("pk1", "t", ["x", "y"]) pk = op.to_constraint() eq_(pk.name, "pk1") eq_(pk.table.name, "t") def test_add_pk(self): pk = self.pk op = ops.AddConstraintOp.from_constraint(pk) eq_(op.to_constraint(), schemacompare.ComparePrimaryKey(pk)) eq_(op.reverse().to_constraint(), schemacompare.ComparePrimaryKey(pk)) is_not_(None, op.to_constraint().table) def test_drop_pk(self): pk = self.pk op = ops.DropConstraintOp.from_constraint(pk) eq_(op.to_constraint(), schemacompare.ComparePrimaryKey(pk)) eq_(op.reverse().to_constraint(), schemacompare.ComparePrimaryKey(pk)) is_not_(None, op.to_constraint().table) def test_drop_column(self): t = self.table op = ops.DropColumnOp.from_column_and_tablename(None, "t", t.c.x) is_(op.to_column(), t.c.x) is_(op.reverse().to_column(), t.c.x) is_not_(None, op.to_column().table) def test_add_column(self): t = self.table op = ops.AddColumnOp.from_column_and_tablename(None, "t", t.c.x) is_(op.to_column(), t.c.x) is_(op.reverse().to_column(), t.c.x) is_not_(None, op.to_column().table) def test_drop_table(self): t = self.table op = ops.DropTableOp.from_table(t) eq_(op.to_table(), schemacompare.CompareTable(t)) eq_(op.reverse().to_table(), schemacompare.CompareTable(t)) def test_add_table(self): t = self.table op = ops.CreateTableOp.from_table(t) eq_(op.to_table(), schemacompare.CompareTable(t)) eq_(op.reverse().to_table(), schemacompare.CompareTable(t)) def test_drop_index(self): op = ops.DropIndexOp.from_index(self.ix) eq_(op.to_index(), schemacompare.CompareIndex(self.ix)) eq_(op.reverse().to_index(), schemacompare.CompareIndex(self.ix)) def test_create_index(self): op = ops.CreateIndexOp.from_index(self.ix) eq_(op.to_index(), schemacompare.CompareIndex(self.ix)) eq_(op.reverse().to_index(), schemacompare.CompareIndex(self.ix)) class MultipleMetaDataTest(AutogenFixtureTest, TestBase): def test_multiple(self): m1a = MetaData() m1b = MetaData() m1c = MetaData() m2a = MetaData() m2b = MetaData() m2c = MetaData() Table("a", m1a, Column("id", Integer, primary_key=True)) Table("b1", m1b, Column("id", Integer, primary_key=True)) Table("b2", m1b, Column("id", Integer, primary_key=True)) Table( "c1", m1c, Column("id", Integer, primary_key=True), Column("x", Integer), ) a = Table( "a", m2a, Column("id", Integer, primary_key=True), Column("q", Integer), ) Table("b1", m2b, Column("id", Integer, primary_key=True)) Table("c1", m2c, Column("id", Integer, primary_key=True)) c2 = Table("c2", m2c, Column("id", Integer, primary_key=True)) diffs = self._fixture([m1a, m1b, m1c], [m2a, m2b, m2c]) eq_(diffs[0], ("add_table", schemacompare.CompareTable(c2))) eq_(diffs[1][0], "remove_table") eq_(diffs[1][1].name, "b2") eq_(diffs[2], ("add_column", None, "a", a.c.q)) eq_(diffs[3][0:3], ("remove_column", None, "c1")) eq_(diffs[3][3].name, "x") def test_empty_list(self): # because they're going to do it.... diffs = self._fixture([], []) eq_(diffs, []) def test_non_list_sequence(self): # we call it "sequence", let's check that m1a = MetaData() m1b = MetaData() m2a = MetaData() m2b = MetaData() Table("a", m1a, Column("id", Integer, primary_key=True)) Table("b", m1b, Column("id", Integer, primary_key=True)) Table("a", m2a, Column("id", Integer, primary_key=True)) b = Table( "b", m2b, Column("id", Integer, primary_key=True), Column("q", Integer), ) diffs = self._fixture((m1a, m1b), (m2a, m2b)) eq_(diffs, [("add_column", None, "b", b.c.q)]) def test_raise_on_dupe(self): m1a = MetaData() m1b = MetaData() m2a = MetaData() m2b = MetaData() Table("a", m1a, Column("id", Integer, primary_key=True)) Table("b1", m1b, Column("id", Integer, primary_key=True)) Table("b2", m1b, Column("id", Integer, primary_key=True)) Table("b3", m1b, Column("id", Integer, primary_key=True)) Table("a", m2a, Column("id", Integer, primary_key=True)) Table("a", m2b, Column("id", Integer, primary_key=True)) Table("b1", m2b, Column("id", Integer, primary_key=True)) Table("b2", m2a, Column("id", Integer, primary_key=True)) Table("b2", m2b, Column("id", Integer, primary_key=True)) assert_raises_message( ValueError, 'Duplicate table keys across multiple MetaData objects: "a", "b2"', self._fixture, [m1a, m1b], [m2a, m2b], ) alembic-rel_1_7_6/tests/test_autogen_indexes.py000066400000000000000000001433231417624537100221010ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import ForeignKey from sqlalchemy import ForeignKeyConstraint from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Numeric from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import UniqueConstraint from sqlalchemy.sql.expression import column from sqlalchemy.sql.expression import desc from alembic.testing import assertions from alembic.testing import combinations from alembic.testing import config from alembic.testing import eq_ from alembic.testing import schemacompare from alembic.testing import TestBase from alembic.testing import util from alembic.testing.env import staging_env from alembic.testing.suite._autogen_fixtures import AutogenFixtureTest from alembic.util import sqla_compat # TODO: create new suites that are taking tests from this suite, with a # separate class for AutogenIndexes, AutogenUniqueConstraint, and a # subset of the tests here. @zzzeek can work on this at a later point. # (2021-06-10) class NoUqReflection: __requires__ = () def setUp(self): staging_env() self.bind = eng = util.testing_engine() def unimpl(*arg, **kw): raise NotImplementedError() eng.dialect.get_unique_constraints = unimpl def test_add_ix_on_table_create(self): return super(NoUqReflection, self).test_add_ix_on_table_create() def test_add_idx_non_col(self): return super(NoUqReflection, self).test_add_idx_non_col() class AutogenerateUniqueIndexTest(AutogenFixtureTest, TestBase): reports_unique_constraints = True reports_unique_constraints_as_indexes = False __requires__ = ("unique_constraint_reflection",) __only_on__ = "sqlite" def test_index_flag_becomes_named_unique_constraint(self): m1 = MetaData() m2 = MetaData() Table( "user", m1, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False, index=True), Column("a1", String(10), server_default="x"), ) Table( "user", m2, Column("id", Integer, primary_key=True), Column("name", String(50), nullable=False), Column("a1", String(10), server_default="x"), UniqueConstraint("name", name="uq_user_name"), ) diffs = self._fixture(m1, m2) if self.reports_unique_constraints: eq_(diffs[0][0], "remove_index") eq_(diffs[0][1].name, "ix_user_name") eq_(diffs[1][0], "add_constraint") eq_(diffs[1][1].name, "uq_user_name") else: eq_(diffs[0][0], "remove_index") eq_(diffs[0][1].name, "ix_user_name") def test_add_unique_constraint(self): m1 = MetaData() m2 = MetaData() Table( "address", m1, Column("id", Integer, primary_key=True), Column("email_address", String(100), nullable=False), Column("qpr", String(10), index=True), ) Table( "address", m2, Column("id", Integer, primary_key=True), Column("email_address", String(100), nullable=False), Column("qpr", String(10), index=True), UniqueConstraint("email_address", name="uq_email_address"), ) diffs = self._fixture(m1, m2) if self.reports_unique_constraints: eq_(diffs[0][0], "add_constraint") eq_(diffs[0][1].name, "uq_email_address") else: eq_(diffs, []) def test_unique_flag_nothing_changed(self): m1 = MetaData() m2 = MetaData() Table( "unq_idx", m1, Column("id", Integer, primary_key=True), Column("x", String(20)), Index("x", "x", unique=True), ) Table( "unq_idx", m2, Column("id", Integer, primary_key=True), Column("x", String(20)), Index("x", "x", unique=True), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_index_becomes_unique(self): m1 = MetaData() m2 = MetaData() Table( "order", m1, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), UniqueConstraint( "order_id", "user_id", name="order_order_id_user_id_unique" ), Index("order_user_id_amount_idx", "user_id", "amount"), ) Table( "order", m2, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), UniqueConstraint( "order_id", "user_id", name="order_order_id_user_id_unique" ), Index( "order_user_id_amount_idx", "user_id", "amount", unique=True ), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_index") eq_(diffs[0][1].name, "order_user_id_amount_idx") eq_(diffs[0][1].unique, False) eq_(diffs[1][0], "add_index") eq_(diffs[1][1].name, "order_user_id_amount_idx") eq_(diffs[1][1].unique, True) def test_mismatch_db_named_col_flag(self): m1 = MetaData() m2 = MetaData() Table( "item", m1, Column("x", Integer), UniqueConstraint("x", name="db_generated_name"), ) # test mismatch between unique=True and # named uq constraint Table("item", m2, Column("x", Integer, unique=True)) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_new_table_added(self): m1 = MetaData() m2 = MetaData() Table( "extra", m2, Column("foo", Integer, index=True), Column("bar", Integer), Index("newtable_idx", "bar"), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_table") eq_(diffs[1][0], "add_index") eq_( sqla_compat._get_constraint_final_name( diffs[1][1], config.db.dialect ), "ix_extra_foo", ) eq_(diffs[2][0], "add_index") eq_(diffs[2][1].name, "newtable_idx") def test_named_cols_changed(self): m1 = MetaData() m2 = MetaData() Table( "col_change", m1, Column("x", Integer), Column("y", Integer), UniqueConstraint("x", name="nochange"), ) Table( "col_change", m2, Column("x", Integer), Column("y", Integer), UniqueConstraint("x", "y", name="nochange"), ) diffs = self._fixture(m1, m2) if self.reports_unique_constraints: eq_(diffs[0][0], "remove_constraint") eq_(diffs[0][1].name, "nochange") eq_(diffs[1][0], "add_constraint") eq_(diffs[1][1].name, "nochange") else: eq_(diffs, []) def test_nothing_changed_one(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("x", String(20), unique=True, index=True), ) Table( "nothing_changed", m2, Column("x", String(20), unique=True, index=True), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_implicit_uq_w_naming_conv(self): m1 = MetaData( naming_convention={ "ix": "ix_%(column_0_label)s", "uq": "uq_%(column_0_label)s", } ) m2 = MetaData( naming_convention={ "ix": "ix_%(column_0_label)s", "uq": "uq_%(column_0_label)s", } ) Table( "nothing_changed", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20), unique=True), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20), unique=True), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_uq_changed_labels_were_truncated(self): m1 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) m2 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) Table( "nothing_changed", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("a_long_name", String(20), unique=True), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("a_long_name", String(20), unique=True), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2, max_identifier_length=30) eq_(diffs, []) @config.requirements.long_names def test_nothing_ix_changed_labels_were_truncated(self): m1 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) m2 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) Table( "nothing_changed", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("a_particularly_long_column_name", String(20), index=True), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("a_particularly_long_column_name", String(20), index=True), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2, max_identifier_length=30) eq_(diffs, []) @config.requirements.long_names def test_nothing_changed_uq_w_mixed_case_nconv_name(self): m1 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) m2 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) Table( "NothingChanged", m1, Column("id", Integer, primary_key=True), Column("XCol", Integer), UniqueConstraint("XCol"), mysql_engine="InnoDB", ) Table( "NothingChanged", m2, Column("id", Integer, primary_key=True), Column("XCol", Integer), UniqueConstraint("XCol"), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_uq_w_mixed_case_plain_name(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("id", Integer, primary_key=True), Column("x", Integer), UniqueConstraint("x", name="SomeConstraint"), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id", Integer, primary_key=True), Column("x", Integer), UniqueConstraint("x", name="SomeConstraint"), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_ix_w_mixed_case_plain_name(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("id", Integer, primary_key=True), Column("x", Integer), Index("SomeIndex", "x"), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id", Integer, primary_key=True), Column("x", Integer), Index("SomeIndex", "x"), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) @config.requirements.long_names def test_nothing_changed_ix_w_mixed_case_nconv_name(self): m1 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) m2 = MetaData( naming_convention={ "ix": "index_%(table_name)s_%(column_0_label)s", "uq": "unique_%(table_name)s_%(column_0_label)s", } ) Table( "NothingChanged", m1, Column("id", Integer, primary_key=True), Column("XCol", Integer, index=True), mysql_engine="InnoDB", ) Table( "NothingChanged", m2, Column("id", Integer, primary_key=True), Column("XCol", Integer, index=True), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_two(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20), unique=True), mysql_engine="InnoDB", ) Table( "nothing_changed_related", m1, Column("id1", Integer), Column("id2", Integer), ForeignKeyConstraint( ["id1", "id2"], ["nothing_changed.id1", "nothing_changed.id2"] ), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20), unique=True), mysql_engine="InnoDB", ) Table( "nothing_changed_related", m2, Column("id1", Integer), Column("id2", Integer), ForeignKeyConstraint( ["id1", "id2"], ["nothing_changed.id1", "nothing_changed.id2"] ), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_unique_w_colkeys(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("x", String(20), key="nx"), UniqueConstraint("nx"), ) Table( "nothing_changed", m2, Column("x", String(20), key="nx"), UniqueConstraint("nx"), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_index_w_colkeys(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("x", String(20), key="nx"), Index("foobar", "nx"), ) Table( "nothing_changed", m2, Column("x", String(20), key="nx"), Index("foobar", "nx"), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_index_named_as_column(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20)), Index("x", "x"), ) Table( "nothing_changed", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20)), Index("x", "x"), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_implicit_fk_index_named(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("id", Integer, primary_key=True), Column( "other_id", ForeignKey("nc2.id", name="fk_my_table_other_table"), nullable=False, ), Column("foo", Integer), mysql_engine="InnoDB", ) Table( "nc2", m1, Column("id", Integer, primary_key=True), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id", Integer, primary_key=True), Column( "other_id", ForeignKey("nc2.id", name="fk_my_table_other_table"), nullable=False, ), Column("foo", Integer), mysql_engine="InnoDB", ) Table( "nc2", m2, Column("id", Integer, primary_key=True), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_nothing_changed_implicit_composite_fk_index_named(self): m1 = MetaData() m2 = MetaData() Table( "nothing_changed", m1, Column("id", Integer, primary_key=True), Column("other_id_1", Integer), Column("other_id_2", Integer), Column("foo", Integer), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["nc2.id1", "nc2.id2"], name="fk_my_table_other_table", ), mysql_engine="InnoDB", ) Table( "nc2", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), mysql_engine="InnoDB", ) Table( "nothing_changed", m2, Column("id", Integer, primary_key=True), Column("other_id_1", Integer), Column("other_id_2", Integer), Column("foo", Integer), ForeignKeyConstraint( ["other_id_1", "other_id_2"], ["nc2.id1", "nc2.id2"], name="fk_my_table_other_table", ), mysql_engine="InnoDB", ) Table( "nc2", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), mysql_engine="InnoDB", ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_ix_casing_convention_changed_so_put_drops_first(self): m1 = MetaData() m2 = MetaData() ix1 = Index("SomeCasingConvention", "x") Table( "new_idx", m1, Column("id1", Integer, primary_key=True), Column("x", String(20)), ix1, ) ix2 = Index("somecasingconvention", "x") Table( "new_idx", m2, Column("id1", Integer, primary_key=True), Column("x", String(20)), ix2, ) diffs = self._fixture(m1, m2) eq_( [(d[0], d[1].name) for d in diffs], [ ("remove_index", "SomeCasingConvention"), ("add_index", "somecasingconvention"), ], ) def test_uq_casing_convention_changed_so_put_drops_first(self): m1 = MetaData() m2 = MetaData() uq1 = UniqueConstraint("x", name="SomeCasingConvention") Table( "new_idx", m1, Column("id1", Integer, primary_key=True), Column("x", String(20)), uq1, ) uq2 = UniqueConstraint("x", name="somecasingconvention") Table( "new_idx", m2, Column("id1", Integer, primary_key=True), Column("x", String(20)), uq2, ) diffs = self._fixture(m1, m2) if self.reports_unique_constraints_as_indexes: eq_( [(d[0], d[1].name) for d in diffs], [ ("remove_index", "SomeCasingConvention"), ("add_constraint", "somecasingconvention"), ], ) else: eq_( [(d[0], d[1].name) for d in diffs], [ ("remove_constraint", "SomeCasingConvention"), ("add_constraint", "somecasingconvention"), ], ) def test_new_idx_index_named_as_column(self): m1 = MetaData() m2 = MetaData() Table( "new_idx", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20)), ) idx = Index("x", "x") Table( "new_idx", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20)), idx, ) diffs = self._fixture(m1, m2) eq_(diffs, [("add_index", schemacompare.CompareIndex(idx))]) def test_removed_idx_index_named_as_column(self): m1 = MetaData() m2 = MetaData() idx = Index("x", "x") Table( "new_idx", m1, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20)), idx, ) Table( "new_idx", m2, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True), Column("x", String(20)), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_index") def test_drop_table_w_indexes(self): m1 = MetaData() m2 = MetaData() t = Table( "some_table", m1, Column("id", Integer, primary_key=True), Column("x", String(20)), Column("y", String(20)), ) Index("xy_idx", t.c.x, t.c.y) Index("y_idx", t.c.y) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_index") eq_(diffs[1][0], "remove_index") eq_(diffs[2][0], "remove_table") eq_( set([diffs[0][1].name, diffs[1][1].name]), set(["xy_idx", "y_idx"]) ) def test_drop_table_w_uq_constraint(self): m1 = MetaData() m2 = MetaData() Table( "some_table", m1, Column("id", Integer, primary_key=True), Column("x", String(20)), Column("y", String(20)), UniqueConstraint("y", name="uq_y"), ) diffs = self._fixture(m1, m2) if self.reports_unique_constraints_as_indexes: # for MySQL this UQ will look like an index, so # make sure it at least sets it up correctly eq_(diffs[0][0], "remove_index") eq_(diffs[1][0], "remove_table") eq_(len(diffs), 2) constraints = [ c for c in diffs[1][1].constraints if isinstance(c, UniqueConstraint) ] eq_(len(constraints), 0) else: eq_(diffs[0][0], "remove_table") eq_(len(diffs), 1) constraints = [ c for c in diffs[0][1].constraints if isinstance(c, UniqueConstraint) ] if self.reports_unique_constraints: eq_(len(constraints), 1) def test_unnamed_cols_changed(self): m1 = MetaData() m2 = MetaData() Table( "col_change", m1, Column("x", Integer), Column("y", Integer), UniqueConstraint("x"), ) Table( "col_change", m2, Column("x", Integer), Column("y", Integer), UniqueConstraint("x", "y"), ) diffs = self._fixture(m1, m2) diffs = set( ( cmd, isinstance(obj, (UniqueConstraint, Index)) if obj.name is not None else False, ) for cmd, obj in diffs ) if self.reports_unnamed_constraints: if self.reports_unique_constraints_as_indexes: eq_( diffs, set([("remove_index", True), ("add_constraint", False)]), ) else: eq_( diffs, set( [ ("remove_constraint", True), ("add_constraint", False), ] ), ) def test_remove_named_unique_index(self): m1 = MetaData() m2 = MetaData() Table( "remove_idx", m1, Column("x", Integer), Index("xidx", "x", unique=True), ) Table("remove_idx", m2, Column("x", Integer)) diffs = self._fixture(m1, m2) if self.reports_unique_constraints: diffs = set((cmd, obj.name) for cmd, obj in diffs) eq_(diffs, set([("remove_index", "xidx")])) else: eq_(diffs, []) def test_remove_named_unique_constraint(self): m1 = MetaData() m2 = MetaData() Table( "remove_idx", m1, Column("x", Integer), UniqueConstraint("x", name="xidx"), ) Table("remove_idx", m2, Column("x", Integer)) diffs = self._fixture(m1, m2) if self.reports_unique_constraints: diffs = set((cmd, obj.name) for cmd, obj in diffs) if self.reports_unique_constraints_as_indexes: eq_(diffs, set([("remove_index", "xidx")])) else: eq_(diffs, set([("remove_constraint", "xidx")])) else: eq_(diffs, []) def test_dont_add_uq_on_table_create(self): m1 = MetaData() m2 = MetaData() Table("no_uq", m2, Column("x", String(50), unique=True)) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_table") eq_(len(diffs), 1) # checking for dupes also eq_( sorted( [type(cons) for cons in diffs[0][1].constraints], key=lambda c: c.__name__, ), [PrimaryKeyConstraint, UniqueConstraint], ) @config.requirements.reflects_unique_constraints_unambiguously def test_dont_add_uq_on_reverse_table_drop(self): m1 = MetaData() m2 = MetaData() Table("no_uq", m1, Column("x", String(50), unique=True)) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_table") eq_(len(diffs), 1) # because the drop comes from reflection, the "unique=True" flag # is lost in any case. eq_( sorted( [type(cons) for cons in diffs[0][1].constraints], key=lambda c: c.__name__, ), [PrimaryKeyConstraint, UniqueConstraint], ) def test_add_uq_ix_on_table_create(self): m1 = MetaData() m2 = MetaData() Table("add_ix", m2, Column("x", String(50), unique=True, index=True)) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_table") eq_(len(diffs), 2) assert UniqueConstraint not in set( type(c) for c in diffs[0][1].constraints ) eq_(diffs[1][0], "add_index") d_table = diffs[0][1] d_idx = diffs[1][1] eq_(d_idx.unique, True) # check for dupes eq_(len(diffs), 2) assert not d_table.indexes def test_add_ix_on_table_create(self): m1 = MetaData() m2 = MetaData() Table("add_ix", m2, Column("x", String(50), index=True)) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_table") eq_(len(diffs), 2) assert UniqueConstraint not in set( type(c) for c in diffs[0][1].constraints ) eq_(diffs[1][0], "add_index") eq_(diffs[1][1].unique, False) def test_add_idx_non_col(self): m1 = MetaData() m2 = MetaData() Table("add_ix", m1, Column("x", String(50))) t2 = Table("add_ix", m2, Column("x", String(50))) Index("foo_idx", t2.c.x.desc()) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "add_index") @config.requirements.reflects_indexes_w_sorting def test_idx_string_col_in_fn_no_change(self): """test #880""" m1 = MetaData() m2 = MetaData() t1 = Table("add_ix", m1, Column("x", String(50))) t1.append_constraint(Index("foo_idx", desc(column("x")))) t2 = Table("add_ix", m2, Column("x", String(50))) t2.append_constraint(Index("foo_idx", desc(column("x")))) diffs = self._fixture(m1, m2) eq_(diffs, []) @config.requirements.reflects_indexes_w_sorting def test_unchanged_idx_non_col(self): m1 = MetaData() m2 = MetaData() t1 = Table("add_ix", m1, Column("x", String(50))) Index("foo_idx", t1.c.x.desc()) t2 = Table("add_ix", m2, Column("x", String(50))) Index("foo_idx", t2.c.x.desc()) diffs = self._fixture(m1, m2) eq_(diffs, []) # fails in the 0.8 series where we have truncation rules, # but no control over quoting. passes in 0.7.9 where we don't have # truncation rules either. dropping these ancient versions # is long overdue. def test_unchanged_case_sensitive_implicit_idx(self): m1 = MetaData() m2 = MetaData() Table("add_ix", m1, Column("regNumber", String(50), index=True)) Table("add_ix", m2, Column("regNumber", String(50), index=True)) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_unchanged_case_sensitive_explicit_idx(self): m1 = MetaData() m2 = MetaData() t1 = Table("add_ix", m1, Column("reg_number", String(50))) Index("regNumber_idx", t1.c.reg_number) t2 = Table("add_ix", m2, Column("reg_number", String(50))) Index("regNumber_idx", t2.c.reg_number) diffs = self._fixture(m1, m2) eq_(diffs, []) class PGUniqueIndexTest(AutogenerateUniqueIndexTest): reports_unnamed_constraints = True __only_on__ = "postgresql" __backend__ = True def test_idx_added_schema(self): m1 = MetaData() m2 = MetaData() Table("add_ix", m1, Column("x", String(50)), schema="test_schema") Table( "add_ix", m2, Column("x", String(50)), Index("ix_1", "x"), schema="test_schema", ) diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs[0][0], "add_index") eq_(diffs[0][1].name, "ix_1") def test_idx_unchanged_schema(self): m1 = MetaData() m2 = MetaData() Table( "add_ix", m1, Column("x", String(50)), Index("ix_1", "x"), schema="test_schema", ) Table( "add_ix", m2, Column("x", String(50)), Index("ix_1", "x"), schema="test_schema", ) diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs, []) def test_uq_added_schema(self): m1 = MetaData() m2 = MetaData() Table("add_uq", m1, Column("x", String(50)), schema="test_schema") Table( "add_uq", m2, Column("x", String(50)), UniqueConstraint("x", name="ix_1"), schema="test_schema", ) diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs[0][0], "add_constraint") eq_(diffs[0][1].name, "ix_1") def test_uq_unchanged_schema(self): m1 = MetaData() m2 = MetaData() Table( "add_uq", m1, Column("x", String(50)), UniqueConstraint("x", name="ix_1"), schema="test_schema", ) Table( "add_uq", m2, Column("x", String(50)), UniqueConstraint("x", name="ix_1"), schema="test_schema", ) diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs, []) @config.requirements.btree_gist def test_exclude_const_unchanged(self): from sqlalchemy.dialects.postgresql import TSRANGE, ExcludeConstraint m1 = MetaData() m2 = MetaData() Table( "add_excl", m1, Column("id", Integer, primary_key=True), Column("period", TSRANGE), ExcludeConstraint(("period", "&&"), name="quarters_period_excl"), ) Table( "add_excl", m2, Column("id", Integer, primary_key=True), Column("period", TSRANGE), ExcludeConstraint(("period", "&&"), name="quarters_period_excl"), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_same_tname_two_schemas(self): m1 = MetaData() m2 = MetaData() Table("add_ix", m1, Column("x", String(50)), Index("ix_1", "x")) Table("add_ix", m2, Column("x", String(50)), Index("ix_1", "x")) Table("add_ix", m2, Column("x", String(50)), schema="test_schema") diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs[0][0], "add_table") eq_(len(diffs), 1) def test_uq_dropped(self): m1 = MetaData() m2 = MetaData() Table( "add_uq", m1, Column("id", Integer, primary_key=True), Column("name", String), UniqueConstraint("name", name="uq_name"), ) Table( "add_uq", m2, Column("id", Integer, primary_key=True), Column("name", String), ) diffs = self._fixture(m1, m2, include_schemas=True) eq_(diffs[0][0], "remove_constraint") eq_(diffs[0][1].name, "uq_name") eq_(len(diffs), 1) def test_functional_ix_one(self): m1 = MetaData() m2 = MetaData() t1 = Table( "foo", m1, Column("id", Integer, primary_key=True), Column("email", String(50)), ) Index("email_idx", func.lower(t1.c.email), unique=True) t2 = Table( "foo", m2, Column("id", Integer, primary_key=True), Column("email", String(50)), ) Index("email_idx", func.lower(t2.c.email), unique=True) with assertions.expect_warnings( "Skipped unsupported reflection", "autogenerate skipping functional index", ): diffs = self._fixture(m1, m2) eq_(diffs, []) def test_functional_ix_two(self): m1 = MetaData() m2 = MetaData() t1 = Table( "foo", m1, Column("id", Integer, primary_key=True), Column("email", String(50)), Column("name", String(50)), ) Index( "email_idx", func.coalesce(t1.c.email, t1.c.name).desc(), unique=True, ) t2 = Table( "foo", m2, Column("id", Integer, primary_key=True), Column("email", String(50)), Column("name", String(50)), ) Index( "email_idx", func.coalesce(t2.c.email, t2.c.name).desc(), unique=True, ) with assertions.expect_warnings( "Skipped unsupported reflection", "autogenerate skipping functional index", ): diffs = self._fixture(m1, m2) eq_(diffs, []) class MySQLUniqueIndexTest(AutogenerateUniqueIndexTest): reports_unnamed_constraints = True reports_unique_constraints_as_indexes = True __only_on__ = "mysql", "mariadb" __backend__ = True def test_removed_idx_index_named_as_column(self): try: super( MySQLUniqueIndexTest, self ).test_removed_idx_index_named_as_column() except IndexError: assert True else: assert False, "unexpected success" class OracleUniqueIndexTest(AutogenerateUniqueIndexTest): reports_unnamed_constraints = True reports_unique_constraints_as_indexes = True __only_on__ = "oracle" __backend__ = True class NoUqReflectionIndexTest(NoUqReflection, AutogenerateUniqueIndexTest): reports_unique_constraints = False __only_on__ = "sqlite" def test_uq_casing_convention_changed_so_put_drops_first(self): config.skip_test( "unique constraint reflection disabled for this suite" ) def test_dont_add_uq_on_reverse_table_drop(self): config.skip_test( "unique constraint reflection disabled for this suite" ) def test_unique_not_reported(self): m1 = MetaData() Table( "order", m1, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), UniqueConstraint( "order_id", "user_id", name="order_order_id_user_id_unique" ), ) diffs = self._fixture(m1, m1) eq_(diffs, []) def test_remove_unique_index_not_reported(self): m1 = MetaData() Table( "order", m1, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), Index("oid_ix", "order_id", "user_id", unique=True), ) m2 = MetaData() Table( "order", m2, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), ) diffs = self._fixture(m1, m2) eq_(diffs, []) def test_remove_plain_index_is_reported(self): m1 = MetaData() Table( "order", m1, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), Index("oid_ix", "order_id", "user_id"), ) m2 = MetaData() Table( "order", m2, Column("order_id", Integer, primary_key=True), Column("amount", Numeric(10, 2), nullable=True), Column("user_id", Integer), ) diffs = self._fixture(m1, m2) eq_(diffs[0][0], "remove_index") class NoUqReportsIndAsUqTest(NoUqReflectionIndexTest): """this test suite simulates the condition where: a. the dialect doesn't report unique constraints b. the dialect returns unique constraints within the indexes list. Currently the mssql dialect does this, but here we force this condition so that we can test the behavior regardless of if/when mssql supports unique constraint reflection. """ __only_on__ = "sqlite" @classmethod def _get_bind(cls): eng = config.db _get_unique_constraints = eng.dialect.get_unique_constraints _get_indexes = eng.dialect.get_indexes def unimpl(*arg, **kw): raise NotImplementedError() def get_indexes(self, connection, tablename, **kw): indexes = _get_indexes(self, connection, tablename, **kw) for uq in _get_unique_constraints( self, connection, tablename, **kw ): uq["unique"] = True indexes.append(uq) return indexes eng.dialect.get_unique_constraints = unimpl eng.dialect.get_indexes = get_indexes return eng class IncludeHooksTest(AutogenFixtureTest, TestBase): __backend__ = True @combinations(("name",), ("object",)) def test_remove_connection_index(self, hook_type): m1 = MetaData() m2 = MetaData() t1 = Table("t", m1, Column("x", Integer), Column("y", Integer)) Index("ix1", t1.c.x) Index("ix2", t1.c.y) Table("t", m2, Column("x", Integer), Column("y", Integer)) if hook_type == "object": def include_object(object_, name, type_, reflected, compare_to): if type_ == "unique_constraint": return False return not ( isinstance(object_, Index) and type_ == "index" and reflected and name == "ix1" ) diffs = self._fixture(m1, m2, object_filters=include_object) elif hook_type == "name": all_names = set() def include_name(name, type_, parent_names): all_names.add((name, type_)) if name == "ix1": eq_(type_, "index") eq_( parent_names, { "table_name": "t", "schema_name": None, "schema_qualified_table_name": "t", }, ) return False else: return True diffs = self._fixture(m1, m2, name_filters=include_name) eq_( all_names, { ("ix1", "index"), ("ix2", "index"), ("y", "column"), ("t", "table"), (None, "schema"), ("x", "column"), }, ) eq_(diffs[0][0], "remove_index") eq_(diffs[0][1].name, "ix2") eq_(len(diffs), 1) @combinations(("name",), ("object",)) @config.requirements.unique_constraint_reflection @config.requirements.reflects_unique_constraints_unambiguously def test_remove_connection_uq(self, hook_type): m1 = MetaData() m2 = MetaData() Table( "t", m1, Column("x", Integer), Column("y", Integer), UniqueConstraint("x", name="uq1"), UniqueConstraint("y", name="uq2"), ) Table("t", m2, Column("x", Integer), Column("y", Integer)) if hook_type == "object": def include_object(object_, name, type_, reflected, compare_to): if type_ == "index": return False return not ( isinstance(object_, UniqueConstraint) and type_ == "unique_constraint" and reflected and name == "uq1" ) diffs = self._fixture(m1, m2, object_filters=include_object) elif hook_type == "name": all_names = set() def include_name(name, type_, parent_names): if type_ == "index": return False # PostgreSQL thing all_names.add((name, type_)) if name == "uq1": eq_(type_, "unique_constraint") eq_( parent_names, { "table_name": "t", "schema_name": None, "schema_qualified_table_name": "t", }, ) return False return True diffs = self._fixture(m1, m2, name_filters=include_name) eq_( all_names, { ("t", "table"), (None, "schema"), ("uq2", "unique_constraint"), ("x", "column"), ("y", "column"), ("uq1", "unique_constraint"), }, ) eq_(diffs[0][0], "remove_constraint") eq_(diffs[0][1].name, "uq2") eq_(len(diffs), 1) def test_add_metadata_index(self): m1 = MetaData() m2 = MetaData() Table("t", m1, Column("x", Integer)) t2 = Table("t", m2, Column("x", Integer)) Index("ix1", t2.c.x) Index("ix2", t2.c.x) def include_object(object_, name, type_, reflected, compare_to): return not ( isinstance(object_, Index) and type_ == "index" and not reflected and name == "ix1" ) diffs = self._fixture(m1, m2, object_filters=include_object) eq_(diffs[0][0], "add_index") eq_(diffs[0][1].name, "ix2") eq_(len(diffs), 1) @config.requirements.unique_constraint_reflection def test_add_metadata_unique(self): m1 = MetaData() m2 = MetaData() Table("t", m1, Column("x", Integer)) Table( "t", m2, Column("x", Integer), UniqueConstraint("x", name="uq1"), UniqueConstraint("x", name="uq2"), ) def include_object(object_, name, type_, reflected, compare_to): return not ( isinstance(object_, UniqueConstraint) and type_ == "unique_constraint" and not reflected and name == "uq1" ) diffs = self._fixture(m1, m2, object_filters=include_object) eq_(diffs[0][0], "add_constraint") eq_(diffs[0][1].name, "uq2") eq_(len(diffs), 1) def test_change_index(self): m1 = MetaData() m2 = MetaData() t1 = Table( "t", m1, Column("x", Integer), Column("y", Integer), Column("z", Integer), ) Index("ix1", t1.c.x) Index("ix2", t1.c.y) t2 = Table( "t", m2, Column("x", Integer), Column("y", Integer), Column("z", Integer), ) Index("ix1", t2.c.x, t2.c.y) Index("ix2", t2.c.x, t2.c.z) def include_object(object_, name, type_, reflected, compare_to): return not ( isinstance(object_, Index) and type_ == "index" and not reflected and name == "ix1" and isinstance(compare_to, Index) ) diffs = self._fixture(m1, m2, object_filters=include_object) eq_(diffs[0][0], "remove_index") eq_(diffs[0][1].name, "ix2") eq_(diffs[1][0], "add_index") eq_(diffs[1][1].name, "ix2") eq_(len(diffs), 2) @config.requirements.unique_constraint_reflection def test_change_unique(self): m1 = MetaData() m2 = MetaData() Table( "t", m1, Column("x", Integer), Column("y", Integer), Column("z", Integer), UniqueConstraint("x", name="uq1"), UniqueConstraint("y", name="uq2"), ) Table( "t", m2, Column("x", Integer), Column("y", Integer), Column("z", Integer), UniqueConstraint("x", "z", name="uq1"), UniqueConstraint("y", "z", name="uq2"), ) def include_object(object_, name, type_, reflected, compare_to): if type_ == "index": return False return not ( isinstance(object_, UniqueConstraint) and type_ == "unique_constraint" and not reflected and name == "uq1" and isinstance(compare_to, UniqueConstraint) ) diffs = self._fixture(m1, m2, object_filters=include_object) eq_(diffs[0][0], "remove_constraint") eq_(diffs[0][1].name, "uq2") eq_(diffs[1][0], "add_constraint") eq_(diffs[1][1].name, "uq2") eq_(len(diffs), 2) class TruncatedIdxTest(AutogenFixtureTest, TestBase): def setUp(self): self.bind = util.testing_engine() self.bind.dialect.max_identifier_length = 30 def test_idx_matches_long(self): from alembic.operations.base import conv m1 = MetaData() Table( "q", m1, Column("id", Integer, primary_key=True), Column("data", Integer), Index( conv("idx_q_table_this_is_more_than_thirty_characters"), "data" ), ) diffs = self._fixture(m1, m1) eq_(diffs, []) alembic-rel_1_7_6/tests/test_autogen_render.py000066400000000000000000002467731417624537100217360ustar00rootroot00000000000000import re import sqlalchemy as sa # noqa from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import cast from sqlalchemy import CHAR from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import DATETIME from sqlalchemy import DateTime from sqlalchemy import DefaultClause from sqlalchemy import Enum from sqlalchemy import ForeignKey from sqlalchemy import ForeignKeyConstraint from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Numeric from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import text from sqlalchemy import types from sqlalchemy import Unicode from sqlalchemy import UniqueConstraint from sqlalchemy.engine.default import DefaultDialect from sqlalchemy.sql import and_ from sqlalchemy.sql import column from sqlalchemy.sql import false from sqlalchemy.sql import literal_column from sqlalchemy.sql import table from sqlalchemy.types import TIMESTAMP from sqlalchemy.types import UserDefinedType from alembic import autogenerate from alembic import op # noqa from alembic import testing from alembic.autogenerate import api from alembic.migration import MigrationContext from alembic.operations import ops from alembic.testing import assert_raises from alembic.testing import assertions from alembic.testing import config from alembic.testing import eq_ from alembic.testing import eq_ignore_whitespace from alembic.testing import mock from alembic.testing import TestBase from alembic.testing.fixtures import op_fixture class AutogenRenderTest(TestBase): """test individual directives""" def setUp(self): ctx_opts = { "sqlalchemy_module_prefix": "sa.", "alembic_module_prefix": "op.", "target_metadata": MetaData(), } context = MigrationContext.configure( dialect=DefaultDialect(), opts=ctx_opts ) self.autogen_context = api.AutogenContext(context) def test_render_add_index(self): """ autogenerate.render._add_index """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index('test_active_code_idx', 'test', " "['active', 'code'], unique=False)", ) @testing.emits_warning("Can't validate argument ") def test_render_add_index_custom_kwarg(self): t = Table( "test", MetaData(), Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index(None, t.c.active, t.c.code, somedialect_foobar="option") op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index(op.f('ix_test_active'), 'test', " "['active', 'code'], unique=False, somedialect_foobar='option')", ) def test_render_add_index_batch(self): """ autogenerate.render._add_index """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.CreateIndexOp.from_index(idx) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.create_index('test_active_code_idx', " "['active', 'code'], unique=False)", ) def test_render_add_index_schema(self): """ autogenerate.render._add_index using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index('test_active_code_idx', 'test', " "['active', 'code'], unique=False, schema='CamelSchema')", ) def test_render_add_index_schema_batch(self): """ autogenerate.render._add_index using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.CreateIndexOp.from_index(idx) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.create_index('test_active_code_idx', " "['active', 'code'], unique=False)", ) def test_render_add_index_func(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("code", String(255)), ) idx = Index("test_lower_code_idx", func.lower(t.c.code)) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index('test_lower_code_idx', 'test', " "[sa.text('lower(code)')], unique=False)", ) def test_render_add_index_cast(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("code", String(255)), ) idx = Index("test_lower_code_idx", cast(t.c.code, String)) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index('test_lower_code_idx', 'test', " "[sa.text('CAST(code AS VARCHAR)')], unique=False)", ) def test_render_add_index_desc(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("code", String(255)), ) idx = Index("test_desc_code_idx", t.c.code.desc()) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index('test_desc_code_idx', 'test', " "[sa.text('code DESC')], unique=False)", ) def test_drop_index(self): """ autogenerate.render._drop_index """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.DropIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_index('test_active_code_idx', table_name='test')", ) @testing.emits_warning("Can't validate argument ") def test_render_drop_index_custom_kwarg(self): t = Table( "test", MetaData(), Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index(None, t.c.active, t.c.code, somedialect_foobar="option") op_obj = ops.DropIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_index(op.f('ix_test_active'), table_name='test', " "somedialect_foobar='option')", ) def test_drop_index_batch(self): """ autogenerate.render._drop_index """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.DropIndexOp.from_index(idx) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.drop_index('test_active_code_idx')", ) def test_drop_index_schema(self): """ autogenerate.render._drop_index using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.DropIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_index('test_active_code_idx', " + "table_name='test', schema='CamelSchema')", ) def test_drop_index_schema_batch(self): """ autogenerate.render._drop_index using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) idx = Index("test_active_code_idx", t.c.active, t.c.code) op_obj = ops.DropIndexOp.from_index(idx) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.drop_index('test_active_code_idx')", ) def test_add_unique_constraint(self): """ autogenerate.render._add_unique_constraint """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) uq = UniqueConstraint(t.c.code, name="uq_test_code") op_obj = ops.AddConstraintOp.from_constraint(uq) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_unique_constraint('uq_test_code', 'test', ['code'])", ) def test_add_unique_constraint_batch(self): """ autogenerate.render._add_unique_constraint """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) uq = UniqueConstraint(t.c.code, name="uq_test_code") op_obj = ops.AddConstraintOp.from_constraint(uq) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.create_unique_constraint('uq_test_code', ['code'])", ) def test_add_unique_constraint_schema(self): """ autogenerate.render._add_unique_constraint using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) uq = UniqueConstraint(t.c.code, name="uq_test_code") op_obj = ops.AddConstraintOp.from_constraint(uq) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_unique_constraint('uq_test_code', 'test', " "['code'], schema='CamelSchema')", ) def test_add_unique_constraint_schema_batch(self): """ autogenerate.render._add_unique_constraint using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) uq = UniqueConstraint(t.c.code, name="uq_test_code") op_obj = ops.AddConstraintOp.from_constraint(uq) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.create_unique_constraint('uq_test_code', " "['code'])", ) def test_drop_unique_constraint(self): """ autogenerate.render._drop_constraint """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) uq = UniqueConstraint(t.c.code, name="uq_test_code") op_obj = ops.DropConstraintOp.from_constraint(uq) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_constraint('uq_test_code', 'test', type_='unique')", ) def test_drop_unique_constraint_schema(self): """ autogenerate.render._drop_constraint using schema """ m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) uq = UniqueConstraint(t.c.code, name="uq_test_code") op_obj = ops.DropConstraintOp.from_constraint(uq) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_constraint('uq_test_code', 'test', " "schema='CamelSchema', type_='unique')", ) def test_drop_unique_constraint_schema_reprobj(self): """ autogenerate.render._drop_constraint using schema """ class SomeObj(str): def __repr__(self): return "foo.camel_schema" op_obj = ops.DropConstraintOp( "uq_test_code", "test", type_="unique", schema=SomeObj("CamelSchema"), ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_constraint('uq_test_code', 'test', " "schema=foo.camel_schema, type_='unique')", ) def test_add_fk_constraint(self): m = MetaData() Table("a", m, Column("id", Integer, primary_key=True)) b = Table("b", m, Column("a_id", Integer, ForeignKey("a.id"))) fk = ForeignKeyConstraint(["a_id"], ["a.id"], name="fk_a_id") b.append_constraint(fk) op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_foreign_key('fk_a_id', 'b', 'a', ['a_id'], ['id'])", ) def test_add_fk_constraint_batch(self): m = MetaData() Table("a", m, Column("id", Integer, primary_key=True)) b = Table("b", m, Column("a_id", Integer, ForeignKey("a.id"))) fk = ForeignKeyConstraint(["a_id"], ["a.id"], name="fk_a_id") b.append_constraint(fk) op_obj = ops.AddConstraintOp.from_constraint(fk) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.create_foreign_key" "('fk_a_id', 'a', ['a_id'], ['id'])", ) def test_add_fk_constraint_kwarg(self): m = MetaData() t1 = Table("t", m, Column("c", Integer)) t2 = Table("t2", m, Column("c_rem", Integer)) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], onupdate="CASCADE") # SQLA 0.9 generates a u'' here for remote cols while 0.8 does not, # so just whack out "'u" here from the generated op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render_op_text(self.autogen_context, op_obj), ), "op.create_foreign_key(None, 't', 't2', ['c'], ['c_rem'], " "onupdate='CASCADE')", ) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], ondelete="CASCADE") op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render_op_text(self.autogen_context, op_obj), ), "op.create_foreign_key(None, 't', 't2', ['c'], ['c_rem'], " "ondelete='CASCADE')", ) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], deferrable=True) op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render_op_text(self.autogen_context, op_obj), ), "op.create_foreign_key(None, 't', 't2', ['c'], ['c_rem'], " "deferrable=True)", ) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], initially="XYZ") op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render_op_text(self.autogen_context, op_obj), ), "op.create_foreign_key(None, 't', 't2', ['c'], ['c_rem'], " "initially='XYZ')", ) fk = ForeignKeyConstraint( [t1.c.c], [t2.c.c_rem], initially="XYZ", ondelete="CASCADE", deferrable=True, ) op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render_op_text(self.autogen_context, op_obj), ), "op.create_foreign_key(None, 't', 't2', ['c'], ['c_rem'], " "ondelete='CASCADE', initially='XYZ', deferrable=True)", ) def test_add_fk_constraint_inline_colkeys(self): m = MetaData() Table("a", m, Column("id", Integer, key="aid", primary_key=True)) b = Table( "b", m, Column("a_id", Integer, ForeignKey("a.aid"), key="baid") ) op_obj = ops.CreateTableOp.from_table(b) py_code = autogenerate.render_op_text(self.autogen_context, op_obj) eq_ignore_whitespace( py_code, "op.create_table('b'," "sa.Column('a_id', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['a_id'], ['a.id'], ))", ) context = op_fixture() eval(py_code) context.assert_( "CREATE TABLE b (a_id INTEGER, " "FOREIGN KEY(a_id) REFERENCES a (id))" ) def test_add_fk_constraint_separate_colkeys(self): m = MetaData() Table("a", m, Column("id", Integer, key="aid", primary_key=True)) b = Table("b", m, Column("a_id", Integer, key="baid")) fk = ForeignKeyConstraint(["baid"], ["a.aid"], name="fk_a_id") b.append_constraint(fk) op_obj = ops.CreateTableOp.from_table(b) py_code = autogenerate.render_op_text(self.autogen_context, op_obj) eq_ignore_whitespace( py_code, "op.create_table('b'," "sa.Column('a_id', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['a_id'], ['a.id'], name='fk_a_id'))", ) context = op_fixture() eval(py_code) context.assert_( "CREATE TABLE b (a_id INTEGER, CONSTRAINT " "fk_a_id FOREIGN KEY(a_id) REFERENCES a (id))" ) context = op_fixture() op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_foreign_key('fk_a_id', 'b', 'a', ['a_id'], ['id'])", ) py_code = autogenerate.render_op_text(self.autogen_context, op_obj) eval(py_code) context.assert_( "ALTER TABLE b ADD CONSTRAINT fk_a_id " "FOREIGN KEY(a_id) REFERENCES a (id)" ) def test_add_fk_constraint_schema(self): m = MetaData() Table( "a", m, Column("id", Integer, primary_key=True), schema="CamelSchemaTwo", ) b = Table( "b", m, Column("a_id", Integer, ForeignKey("a.id")), schema="CamelSchemaOne", ) fk = ForeignKeyConstraint( ["a_id"], ["CamelSchemaTwo.a.id"], name="fk_a_id" ) b.append_constraint(fk) op_obj = ops.AddConstraintOp.from_constraint(fk) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_foreign_key('fk_a_id', 'b', 'a', ['a_id'], ['id']," " source_schema='CamelSchemaOne', " "referent_schema='CamelSchemaTwo')", ) def test_add_fk_constraint_schema_batch(self): m = MetaData() Table( "a", m, Column("id", Integer, primary_key=True), schema="CamelSchemaTwo", ) b = Table( "b", m, Column("a_id", Integer, ForeignKey("a.id")), schema="CamelSchemaOne", ) fk = ForeignKeyConstraint( ["a_id"], ["CamelSchemaTwo.a.id"], name="fk_a_id" ) b.append_constraint(fk) op_obj = ops.AddConstraintOp.from_constraint(fk) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.create_foreign_key('fk_a_id', 'a', ['a_id'], ['id']," " referent_schema='CamelSchemaTwo')", ) def test_drop_fk_constraint(self): m = MetaData() Table("a", m, Column("id", Integer, primary_key=True)) b = Table("b", m, Column("a_id", Integer, ForeignKey("a.id"))) fk = ForeignKeyConstraint(["a_id"], ["a.id"], name="fk_a_id") b.append_constraint(fk) op_obj = ops.DropConstraintOp.from_constraint(fk) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_constraint('fk_a_id', 'b', type_='foreignkey')", ) def test_drop_fk_constraint_batch(self): m = MetaData() Table("a", m, Column("id", Integer, primary_key=True)) b = Table("b", m, Column("a_id", Integer, ForeignKey("a.id"))) fk = ForeignKeyConstraint(["a_id"], ["a.id"], name="fk_a_id") b.append_constraint(fk) op_obj = ops.DropConstraintOp.from_constraint(fk) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.drop_constraint('fk_a_id', type_='foreignkey')", ) def test_drop_fk_constraint_schema(self): m = MetaData() Table( "a", m, Column("id", Integer, primary_key=True), schema="CamelSchemaTwo", ) b = Table( "b", m, Column("a_id", Integer, ForeignKey("a.id")), schema="CamelSchemaOne", ) fk = ForeignKeyConstraint( ["a_id"], ["CamelSchemaTwo.a.id"], name="fk_a_id" ) b.append_constraint(fk) op_obj = ops.DropConstraintOp.from_constraint(fk) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_constraint('fk_a_id', 'b', schema='CamelSchemaOne', " "type_='foreignkey')", ) def test_drop_fk_constraint_batch_schema(self): m = MetaData() Table( "a", m, Column("id", Integer, primary_key=True), schema="CamelSchemaTwo", ) b = Table( "b", m, Column("a_id", Integer, ForeignKey("a.id")), schema="CamelSchemaOne", ) fk = ForeignKeyConstraint( ["a_id"], ["CamelSchemaTwo.a.id"], name="fk_a_id" ) b.append_constraint(fk) op_obj = ops.DropConstraintOp.from_constraint(fk) with self.autogen_context._within_batch(): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "batch_op.drop_constraint('fk_a_id', type_='foreignkey')", ) def test_render_table_upgrade(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("name", Unicode(255)), Column("address_id", Integer, ForeignKey("address.id")), Column("timestamp", DATETIME, server_default="NOW()"), Column("amount", Numeric(5, 2)), UniqueConstraint("name", name="uq_name"), UniqueConstraint("timestamp"), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('name', sa.Unicode(length=255), nullable=True)," "sa.Column('address_id', sa.Integer(), nullable=True)," "sa.Column('timestamp', sa.DATETIME(), " "server_default='NOW()', " "nullable=True)," "sa.Column('amount', sa.Numeric(precision=5, scale=2), " "nullable=True)," "sa.ForeignKeyConstraint(['address_id'], ['address.id'], )," "sa.PrimaryKeyConstraint('id')," "sa.UniqueConstraint('name', name='uq_name')," "sa.UniqueConstraint('timestamp')" ")", ) def test_render_table_w_schema(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("q", Integer, ForeignKey("address.id")), schema="foo", ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('q', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['q'], ['address.id'], )," "sa.PrimaryKeyConstraint('id')," "schema='foo'" ")", ) def test_render_table_w_system(self): m = MetaData() t = Table( "sometable", m, Column("id", Integer, primary_key=True), Column("xmin", Integer, system=True, nullable=False), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('sometable'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('xmin', sa.Integer(), nullable=False, system=True)," "sa.PrimaryKeyConstraint('id'))", ) def test_render_table_w_unicode_name(self): m = MetaData() t = Table( "\u0411\u0435\u0437", m, Column("id", Integer, primary_key=True), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table(%r," "sa.Column('id', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('id'))" % "\u0411\u0435\u0437", ) def test_render_table_w_unicode_schema(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), schema="\u0411\u0435\u0437", ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('id')," "schema=%r)" % "\u0411\u0435\u0437", ) def test_render_table_w_unsupported_constraint(self): from sqlalchemy.sql.schema import ColumnCollectionConstraint class SomeCustomConstraint(ColumnCollectionConstraint): __visit_name__ = "some_custom" m = MetaData() t = Table("t", m, Column("id", Integer), SomeCustomConstraint("id")) op_obj = ops.CreateTableOp.from_table(t) with assertions.expect_warnings( "No renderer is established for object SomeCustomConstraint" ): eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t'," "sa.Column('id', sa.Integer(), nullable=True)," "[Unknown Python object " "SomeCustomConstraint(Column('id', Integer(), table=))])", ) @mock.patch("alembic.autogenerate.render.MAX_PYTHON_ARGS", 3) def test_render_table_max_cols(self): m = MetaData() t = Table( "test", m, Column("a", Integer), Column("b", Integer), Column("c", Integer), Column("d", Integer), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "*[sa.Column('a', sa.Integer(), nullable=True)," "sa.Column('b', sa.Integer(), nullable=True)," "sa.Column('c', sa.Integer(), nullable=True)," "sa.Column('d', sa.Integer(), nullable=True)])", ) t2 = Table( "test2", m, Column("a", Integer), Column("b", Integer), Column("c", Integer), ) op_obj = ops.CreateTableOp.from_table(t2) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test2'," "sa.Column('a', sa.Integer(), nullable=True)," "sa.Column('b', sa.Integer(), nullable=True)," "sa.Column('c', sa.Integer(), nullable=True))", ) def test_render_table_w_fk_schema(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("q", Integer, ForeignKey("foo.address.id")), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('q', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['q'], ['foo.address.id'], )," "sa.PrimaryKeyConstraint('id')" ")", ) def test_render_table_w_metadata_schema(self): m = MetaData(schema="foo") t = Table( "test", m, Column("id", Integer, primary_key=True), Column("q", Integer, ForeignKey("address.id")), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render_op_text(self.autogen_context, op_obj), ), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('q', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['q'], ['foo.address.id'], )," "sa.PrimaryKeyConstraint('id')," "schema='foo'" ")", ) def test_render_table_w_metadata_schema_override(self): m = MetaData(schema="foo") t = Table( "test", m, Column("id", Integer, primary_key=True), Column("q", Integer, ForeignKey("bar.address.id")), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('q', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['q'], ['bar.address.id'], )," "sa.PrimaryKeyConstraint('id')," "schema='foo'" ")", ) def test_render_table_w_prefixes(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), prefixes=["TEST", "PREFIXES"], ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('id')," "prefixes=['TEST', 'PREFIXES']" ")", ) def test_render_table_w_prefixes_schema(self): m = MetaData(schema="foo") t = Table( "test", m, Column("id", Integer, primary_key=True), prefixes=["TEST", "PREFIXES"], ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('id')," "schema='foo'," "prefixes=['TEST', 'PREFIXES']" ")", ) def test_render_addtl_args(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("q", Integer, ForeignKey("bar.address.id")), sqlite_autoincrement=True, mysql_engine="InnoDB", ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('q', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['q'], ['bar.address.id'], )," "sa.PrimaryKeyConstraint('id')," "mysql_engine='InnoDB',sqlite_autoincrement=True)", ) def test_render_drop_table(self): op_obj = ops.DropTableOp.from_table(Table("sometable", MetaData())) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_table('sometable')", ) def test_render_drop_table_w_schema(self): op_obj = ops.DropTableOp.from_table( Table("sometable", MetaData(), schema="foo") ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_table('sometable', schema='foo')", ) def test_render_table_no_implicit_check(self): m = MetaData() t = Table("test", m, Column("x", Boolean())) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('x', sa.Boolean(), nullable=True))", ) def test_render_pk_with_col_name_vs_col_key(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer, key="y", primary_key=True)) op_obj = ops.CreateTableOp.from_table(t1) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t1'," "sa.Column('x', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('x'))", ) def test_render_empty_pk_vs_nonempty_pk(self): m = MetaData() t1 = Table("t1", m, Column("x", Integer)) t2 = Table("t2", m, Column("x", Integer, primary_key=True)) op_obj = ops.CreateTableOp.from_table(t1) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t1'," "sa.Column('x', sa.Integer(), nullable=True))", ) op_obj = ops.CreateTableOp.from_table(t2) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t2'," "sa.Column('x', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('x'))", ) def test_render_table_w_autoincrement(self): m = MetaData() t = Table( "test", m, Column("id1", Integer, primary_key=True), Column("id2", Integer, primary_key=True, autoincrement=True), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id1', sa.Integer(), nullable=False)," "sa.Column('id2', sa.Integer(), autoincrement=True, " "nullable=False)," "sa.PrimaryKeyConstraint('id1', 'id2')" ")", ) def test_render_add_column(self): op_obj = ops.AddColumnOp( "foo", Column("x", Integer, server_default="5") ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('x', sa.Integer(), " "server_default='5', nullable=True))", ) @config.requirements.sqlalchemy_13 @testing.emits_warning("Can't validate argument ") def test_render_add_column_custom_kwarg(self): col = Column( "x", Integer, server_default="5", somedialect_foobar="option" ) Table("foo", MetaData(), col) op_obj = ops.AddColumnOp.from_column(col) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('x', sa.Integer(), " "server_default='5', nullable=True, somedialect_foobar='option'))", ) def test_render_add_column_system(self): # this would never actually happen since "system" columns # can't be added in any case. Howver it will render as # part of op.CreateTableOp. op_obj = ops.AddColumnOp("foo", Column("xmin", Integer, system=True)) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('xmin', sa.Integer(), " "nullable=True, system=True))", ) def test_render_add_column_w_schema(self): op_obj = ops.AddColumnOp( "bar", Column("x", Integer, server_default="5"), schema="foo" ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('bar', sa.Column('x', sa.Integer(), " "server_default='5', nullable=True), schema='foo')", ) def test_render_drop_column(self): op_obj = ops.DropColumnOp.from_column_and_tablename( None, "foo", Column("x", Integer, server_default="5") ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_column('foo', 'x')", ) def test_render_drop_column_w_schema(self): op_obj = ops.DropColumnOp.from_column_and_tablename( "foo", "bar", Column("x", Integer, server_default="5") ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_column('bar', 'x', schema='foo')", ) def test_render_quoted_server_default(self): eq_( autogenerate.render._render_server_default( "nextval('group_to_perm_group_to_perm_id_seq'::regclass)", self.autogen_context, ), "\"nextval('group_to_perm_group_to_perm_id_seq'::regclass)\"", ) def test_render_unicode_server_default(self): default = ( "\u0411\u0435\u0437 " "\u043d\u0430\u0437\u0432\u0430\u043d\u0438\u044f" ) c = Column("x", Unicode, server_default=text(default)) eq_ignore_whitespace( autogenerate.render._render_server_default( c.server_default, self.autogen_context ), "sa.text(%r)" % default, ) def test_render_col_with_server_default(self): c = Column( "updated_at", TIMESTAMP(), server_default='TIMEZONE("utc", CURRENT_TIMESTAMP)', nullable=False, ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('updated_at', sa.TIMESTAMP(), " "server_default='TIMEZONE(\"utc\", CURRENT_TIMESTAMP)', " "nullable=False)", ) def test_render_col_with_comment(self): c = Column("some_key", Integer, comment="This is a comment") Table("some_table", MetaData(), c) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('some_key', sa.Integer(), " "nullable=True, " "comment='This is a comment')", ) def test_render_col_comment_with_quote(self): c = Column("some_key", Integer, comment="This is a john's comment") Table("some_table", MetaData(), c) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('some_key', sa.Integer(), " "nullable=True, " 'comment="This is a john\'s comment")', ) def test_render_col_autoinc_false_mysql(self): c = Column("some_key", Integer, primary_key=True, autoincrement=False) Table("some_table", MetaData(), c) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('some_key', sa.Integer(), " "autoincrement=False, " "nullable=False)", ) def test_render_custom(self): class MySpecialType(Integer): pass def render(type_, obj, context): if type_ == "foreign_key": # causes it not to render return None if type_ == "column": if obj.name == "y": return None elif obj.name == "q": return False else: return "col(%s)" % obj.name if type_ == "type" and isinstance(obj, MySpecialType): context.imports.add("from mypackage import MySpecialType") return "MySpecialType()" return "render:%s" % type_ self.autogen_context.opts.update( render_item=render, alembic_module_prefix="sa." ) t = Table( "t", MetaData(), Column("x", Integer), Column("y", Integer), Column("q", MySpecialType()), PrimaryKeyConstraint("x"), ForeignKeyConstraint(["x"], ["remote.y"]), ) op_obj = ops.CreateTableOp.from_table(t) result = autogenerate.render_op_text(self.autogen_context, op_obj) eq_ignore_whitespace( result, "sa.create_table('t'," "col(x)," "sa.Column('q', MySpecialType(), nullable=True)," "render:primary_key)", ) eq_( self.autogen_context.imports, set(["from mypackage import MySpecialType"]), ) def test_render_modify_type(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_type=CHAR(10), existing_type=CHAR(20), ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.CHAR(length=20), type_=sa.CHAR(length=10))", ) def test_render_modify_type_w_schema(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_type=CHAR(10), existing_type=CHAR(20), schema="foo", ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.CHAR(length=20), type_=sa.CHAR(length=10), " "schema='foo')", ) def test_render_modify_nullable(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_type=Integer(), modify_nullable=True, ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.Integer(), nullable=True)", ) def test_render_modify_nullable_no_existing_type(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_nullable=True ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', nullable=True)", ) def test_render_modify_nullable_w_schema(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_type=Integer(), modify_nullable=True, schema="foo", ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.Integer(), nullable=True, schema='foo')", ) def test_render_modify_type_w_autoincrement(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_type=Integer(), existing_type=BigInteger(), autoincrement=True, ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.BigInteger(), type_=sa.Integer(), " "autoincrement=True)", ) def test_render_fk_constraint_kwarg(self): m = MetaData() t1 = Table("t", m, Column("c", Integer)) t2 = Table("t2", m, Column("c_rem", Integer)) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], onupdate="CASCADE") # SQLA 0.9 generates a u'' here for remote cols while 0.8 does not, # so just whack out "'u" here from the generated eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], onupdate='CASCADE')", ) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], ondelete="CASCADE") eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], ondelete='CASCADE')", ) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], deferrable=True) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], deferrable=True)", ) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], initially="XYZ") eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], initially='XYZ')", ) fk = ForeignKeyConstraint( [t1.c.c], [t2.c.c_rem], initially="XYZ", ondelete="CASCADE", deferrable=True, ) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], " "ondelete='CASCADE', initially='XYZ', deferrable=True)", ) def test_render_fk_constraint_resolve_key(self): m = MetaData() t1 = Table("t", m, Column("c", Integer)) Table("t2", m, Column("c_rem", Integer, key="c_remkey")) fk = ForeignKeyConstraint(["c"], ["t2.c_remkey"]) t1.append_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], )", ) def test_render_fk_constraint_bad_table_resolve(self): m = MetaData() t1 = Table("t", m, Column("c", Integer)) Table("t2", m, Column("c_rem", Integer)) fk = ForeignKeyConstraint(["c"], ["t2.nonexistent"]) t1.append_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.nonexistent'], )", ) def test_render_fk_constraint_bad_table_resolve_dont_get_confused(self): m = MetaData() t1 = Table("t", m, Column("c", Integer)) Table( "t2", m, Column("c_rem", Integer, key="cr_key"), Column("c_rem_2", Integer, key="c_rem"), ) fk = ForeignKeyConstraint(["c"], ["t2.c_rem"], link_to_name=True) t1.append_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], )", ) def test_render_fk_constraint_link_to_name(self): m = MetaData() t1 = Table("t", m, Column("c", Integer)) Table("t2", m, Column("c_rem", Integer, key="c_remkey")) fk = ForeignKeyConstraint(["c"], ["t2.c_rem"], link_to_name=True) t1.append_constraint(fk) eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['t2.c_rem'], )", ) def test_render_fk_constraint_use_alter(self): m = MetaData() Table("t", m, Column("c", Integer)) t2 = Table( "t2", m, Column( "c_rem", Integer, ForeignKey("t.c", name="fk1", use_alter=True) ), ) const = list(t2.foreign_keys)[0].constraint eq_ignore_whitespace( autogenerate.render._render_constraint( const, self.autogen_context, m ), "sa.ForeignKeyConstraint(['c_rem'], ['t.c'], " "name='fk1', use_alter=True)", ) def test_render_fk_constraint_w_metadata_schema(self): m = MetaData(schema="foo") t1 = Table("t", m, Column("c", Integer)) t2 = Table("t2", m, Column("c_rem", Integer)) fk = ForeignKeyConstraint([t1.c.c], [t2.c.c_rem], onupdate="CASCADE") eq_ignore_whitespace( re.sub( r"u'", "'", autogenerate.render._render_constraint( fk, self.autogen_context, m ), ), "sa.ForeignKeyConstraint(['c'], ['foo.t2.c_rem'], " "onupdate='CASCADE')", ) def test_render_check_constraint_literal(self): eq_ignore_whitespace( autogenerate.render._render_check_constraint( CheckConstraint("im a constraint", name="cc1"), self.autogen_context, None, ), "sa.CheckConstraint('im a constraint', name='cc1')", ) def test_render_check_constraint_sqlexpr(self): c = column("c") five = literal_column("5") ten = literal_column("10") eq_ignore_whitespace( autogenerate.render._render_check_constraint( CheckConstraint(and_(c > five, c < ten)), self.autogen_context, None, ), "sa.CheckConstraint('c > 5 AND c < 10')", ) def test_render_check_constraint_literal_binds(self): c = column("c") eq_ignore_whitespace( autogenerate.render._render_check_constraint( CheckConstraint(and_(c > 5, c < 10)), self.autogen_context, None, ), "sa.CheckConstraint('c > 5 AND c < 10')", ) def test_render_unique_constraint_opts(self): m = MetaData() t = Table("t", m, Column("c", Integer)) eq_ignore_whitespace( autogenerate.render._render_unique_constraint( UniqueConstraint(t.c.c, name="uq_1", deferrable="XYZ"), self.autogen_context, None, ), "sa.UniqueConstraint('c', deferrable='XYZ', name='uq_1')", ) def test_add_unique_constraint_unicode_schema(self): m = MetaData() t = Table( "t", m, Column("c", Integer), schema="\u0411\u0435\u0437", ) op_obj = ops.AddConstraintOp.from_constraint(UniqueConstraint(t.c.c)) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_unique_constraint(None, 't', ['c'], " "schema=%r)" % "\u0411\u0435\u0437", ) def test_render_modify_nullable_w_default(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_type=Integer(), existing_server_default="5", modify_nullable=True, ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.Integer(), nullable=True, " "existing_server_default='5')", ) def test_render_enum(self): eq_ignore_whitespace( autogenerate.render._repr_type( Enum("one", "two", "three", name="myenum"), self.autogen_context, ), "sa.Enum('one', 'two', 'three', name='myenum')", ) eq_ignore_whitespace( autogenerate.render._repr_type( Enum("one", "two", "three"), self.autogen_context ), "sa.Enum('one', 'two', 'three')", ) def test_render_non_native_enum(self): eq_ignore_whitespace( autogenerate.render._repr_type( Enum("one", "two", "three", native_enum=False), self.autogen_context, ), "sa.Enum('one', 'two', 'three', native_enum=False)", ) def test_repr_plain_sqla_type(self): type_ = Integer() eq_ignore_whitespace( autogenerate.render._repr_type(type_, self.autogen_context), "sa.Integer()", ) def test_generic_array_type(self): eq_ignore_whitespace( autogenerate.render._repr_type( types.ARRAY(Integer), self.autogen_context ), "sa.ARRAY(sa.Integer())", ) eq_ignore_whitespace( autogenerate.render._repr_type( types.ARRAY(DateTime(timezone=True)), self.autogen_context ), "sa.ARRAY(sa.DateTime(timezone=True))", ) def test_render_array_no_context(self): uo = ops.UpgradeOps( ops=[ ops.CreateTableOp( "sometable", [Column("x", types.ARRAY(Integer))] ) ] ) eq_( autogenerate.render_python_code(uo), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('sometable',\n" " sa.Column('x', sa.ARRAY(sa.Integer()), nullable=True)\n" " )\n" " # ### end Alembic commands ###", ) def test_render_server_default_no_context(self): uo = ops.UpgradeOps( ops=[ ops.CreateTableOp( "sometable", [Column("x", types.DateTime(), server_default=func.now())], ) ] ) eq_ignore_whitespace( autogenerate.render_python_code(uo), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('sometable',\n" " sa.Column('x', sa.DateTime(), " "server_default=sa.text('now()'), nullable=True)\n" " )\n" " # ### end Alembic commands ###", ) def test_render_server_default_context_passed(self): uo = ops.UpgradeOps( ops=[ ops.CreateTableOp( "sometable", [Column("x", types.DateTime(), server_default=func.now())], ) ] ) context = MigrationContext.configure(dialect_name="sqlite") eq_ignore_whitespace( autogenerate.render_python_code(uo, migration_context=context), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('sometable',\n" " sa.Column('x', sa.DateTime(), " "server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=True)\n" " )\n" " # ### end Alembic commands ###", ) def test_repr_custom_type_w_sqla_prefix(self): self.autogen_context.opts["user_module_prefix"] = None class MyType(UserDefinedType): pass MyType.__module__ = "sqlalchemy_util.types" type_ = MyType() eq_ignore_whitespace( autogenerate.render._repr_type(type_, self.autogen_context), "sqlalchemy_util.types.MyType()", ) def test_render_variant(self): from sqlalchemy import VARCHAR, CHAR self.autogen_context.opts["user_module_prefix"] = None type_ = ( String(5) .with_variant(VARCHAR(10), "mysql") .with_variant(CHAR(15), "oracle") ) # the new Black formatting will help a lot with this eq_ignore_whitespace( autogenerate.render._repr_type(type_, self.autogen_context), "sa.String(length=5)." "with_variant(sa.VARCHAR(length=10), 'mysql')." "with_variant(sa.CHAR(length=15), 'oracle')", ) def test_repr_user_type_user_prefix_None(self): class MyType(UserDefinedType): def get_col_spec(self): return "MYTYPE" type_ = MyType() self.autogen_context.opts["user_module_prefix"] = None eq_ignore_whitespace( autogenerate.render._repr_type(type_, self.autogen_context), "tests.test_autogen_render.MyType()", ) def test_repr_user_type_user_prefix_present(self): from sqlalchemy.types import UserDefinedType class MyType(UserDefinedType): def get_col_spec(self): return "MYTYPE" type_ = MyType() self.autogen_context.opts["user_module_prefix"] = "user." eq_ignore_whitespace( autogenerate.render._repr_type(type_, self.autogen_context), "user.MyType()", ) def test_repr_dialect_type(self): from sqlalchemy.dialects.mysql import VARCHAR type_ = VARCHAR(20, charset="utf8", national=True) self.autogen_context.opts["user_module_prefix"] = None eq_ignore_whitespace( autogenerate.render._repr_type(type_, self.autogen_context), "mysql.VARCHAR(charset='utf8', national=True, length=20)", ) eq_( self.autogen_context.imports, set(["from sqlalchemy.dialects import mysql"]), ) def test_render_server_default_text(self): c = Column( "updated_at", TIMESTAMP(), server_default=text("now()"), nullable=False, ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('updated_at', sa.TIMESTAMP(), " "server_default=sa.text('now()'), " "nullable=False)", ) def test_render_server_default_non_native_boolean(self): c = Column( "updated_at", Boolean(), server_default=false(), nullable=False ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('updated_at', sa.Boolean(), " "server_default=sa.text('0'), " "nullable=False)", ) def test_render_server_default_func(self): c = Column( "updated_at", TIMESTAMP(), server_default=func.now(), nullable=False, ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('updated_at', sa.TIMESTAMP(), " "server_default=sa.text('now()'), " "nullable=False)", ) def test_render_server_default_int(self): c = Column("value", Integer, server_default="0") result = autogenerate.render._render_column(c, self.autogen_context) eq_( result, "sa.Column('value', sa.Integer(), " "server_default='0', nullable=True)", ) def test_render_modify_reflected_int_server_default(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_type=Integer(), existing_server_default=DefaultClause(text("5")), modify_nullable=True, ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_type=sa.Integer(), nullable=True, " "existing_server_default=sa.text('5'))", ) def test_render_executesql_plaintext(self): op_obj = ops.ExecuteSQLOp("drop table foo") eq_( autogenerate.render_op_text(self.autogen_context, op_obj), "op.execute('drop table foo')", ) def test_render_executesql_sqlexpr_notimplemented(self): sql = table("x", column("q")).insert() op_obj = ops.ExecuteSQLOp(sql) assert_raises( NotImplementedError, autogenerate.render_op_text, self.autogen_context, op_obj, ) def test_render_alter_column_modify_comment(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_comment="This is a comment" ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "comment='This is a comment')", ) def test_render_alter_column_existing_comment(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_comment="This is a comment" ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_comment='This is a comment')", ) def test_render_col_drop_comment(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_comment="This is a comment", modify_comment=None, ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "comment=None, " "existing_comment='This is a comment')", ) def test_render_table_with_comment(self): m = MetaData() t = Table( "test", m, Column("id", Integer, primary_key=True), Column("q", Integer, ForeignKey("address.id")), comment="test comment", ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('test'," "sa.Column('id', sa.Integer(), nullable=False)," "sa.Column('q', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['q'], ['address.id'], )," "sa.PrimaryKeyConstraint('id')," "comment='test comment'" ")", ) def test_render_add_column_with_comment(self): op_obj = ops.AddColumnOp( "foo", Column("x", Integer, comment="This is a Column") ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('x', sa.Integer(), " "nullable=True, comment='This is a Column'))", ) def test_render_create_table_comment_op(self): op_obj = ops.CreateTableCommentOp("table_name", "comment") eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table_comment(" " 'table_name'," " 'comment'," " existing_comment=None," " schema=None" ")", ) def test_render_create_table_comment_with_quote_op(self): op_obj = ops.CreateTableCommentOp( "table_name", "This is john's comment", existing_comment='This was john\'s "comment"', ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table_comment(" " 'table_name'," ' "This is john\'s comment",' " existing_comment='This was john\\'s \"comment\"'," " schema=None" ")", ) def test_render_create_table_comment_op_with_existing_comment(self): op_obj = ops.CreateTableCommentOp( "table_name", "comment", existing_comment="old comment" ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table_comment(" " 'table_name'," " 'comment'," " existing_comment='old comment'," " schema=None" ")", ) def test_render_create_table_comment_op_with_schema(self): op_obj = ops.CreateTableCommentOp( "table_name", "comment", schema="SomeSchema" ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table_comment(" " 'table_name'," " 'comment'," " existing_comment=None," " schema='SomeSchema'" ")", ) def test_render_drop_table_comment_op(self): op_obj = ops.DropTableCommentOp("table_name") eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_table_comment(" " 'table_name'," " existing_comment=None," " schema=None" ")", ) def test_render_drop_table_comment_op_existing_with_quote(self): op_obj = ops.DropTableCommentOp( "table_name", existing_comment="This was john's comment" ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_table_comment(" " 'table_name'," ' existing_comment="This was john\'s comment",' " schema=None" ")", ) @config.requirements.computed_columns_api def test_render_add_column_computed(self): c = sa.Computed("5") op_obj = ops.AddColumnOp("foo", Column("x", Integer, c)) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('x', sa.Integer(), " "sa.Computed('5', ), nullable=True))", ) @config.requirements.computed_columns_api @testing.combinations((True,), (False,)) def test_render_add_column_computed_persisted(self, persisted): op_obj = ops.AddColumnOp( "foo", Column("x", Integer, sa.Computed("5", persisted=persisted)) ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('x', sa.Integer(), " "sa.Computed('5', persisted=%s), nullable=True))" % persisted, ) @config.requirements.computed_columns_api def test_render_alter_column_computed_modify_default(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_server_default=sa.Computed("7") ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "server_default=sa.Computed('7', ))", ) @config.requirements.computed_columns_api def test_render_alter_column_computed_existing_default(self): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_server_default=sa.Computed("42"), ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_server_default=sa.Computed('42', ))", ) @config.requirements.computed_columns_api @testing.combinations((True,), (False,)) def test_render_alter_column_computed_modify_default_perisisted( self, persisted ): op_obj = ops.AlterColumnOp( "sometable", "somecolumn", modify_server_default=sa.Computed("7", persisted=persisted), ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', server_default" "=sa.Computed('7', persisted=%s))" % persisted, ) @config.requirements.computed_columns_api @testing.combinations((True,), (False,)) def test_render_alter_column_computed_existing_default_perisisted( self, persisted ): c = sa.Computed("42", persisted=persisted) op_obj = ops.AlterColumnOp( "sometable", "somecolumn", existing_server_default=c ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('sometable', 'somecolumn', " "existing_server_default=sa.Computed('42', persisted=%s))" % persisted, ) @config.requirements.identity_columns_api @testing.combinations( ({}, "sa.Identity(always=False)"), (dict(always=None), "sa.Identity(always=None)"), (dict(always=True), "sa.Identity(always=True)"), ( dict( always=False, on_null=True, start=2, increment=4, minvalue=-3, maxvalue=99, nominvalue=True, nomaxvalue=True, cycle=True, cache=42, order=True, ), "sa.Identity(always=False, on_null=True, start=2, increment=4, " "minvalue=-3, maxvalue=99, nominvalue=True, nomaxvalue=True, " "cycle=True, cache=42, order=True)", ), ) def test_render_add_column_identity(self, kw, text): col = Column("x", Integer, sa.Identity(**kw)) op_obj = ops.AddColumnOp("foo", col) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('foo', sa.Column('x', sa.Integer(), " "%s, nullable=%r))" % (text, col.nullable), ) @config.requirements.identity_columns_api @testing.combinations( ({}, "sa.Identity(always=False)"), (dict(always=None), "sa.Identity(always=None)"), (dict(always=True), "sa.Identity(always=True)"), ( dict( always=False, on_null=True, start=2, increment=4, minvalue=-3, maxvalue=99, nominvalue=True, nomaxvalue=True, cycle=True, cache=42, order=True, ), "sa.Identity(always=False, on_null=True, start=2, increment=4, " "minvalue=-3, maxvalue=99, nominvalue=True, nomaxvalue=True, " "cycle=True, cache=42, order=True)", ), ) def test_render_alter_column_add_identity(self, kw, text): op_obj = ops.AlterColumnOp( "foo", "x", existing_type=Integer(), existing_server_default=None, modify_server_default=sa.Identity(**kw), ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('foo', 'x', existing_type=sa.Integer(), " "server_default=%s)" % text, ) @config.requirements.identity_columns_api def test_render_alter_column_drop_identity(self): op_obj = ops.AlterColumnOp( "foo", "x", existing_type=Integer(), existing_server_default=sa.Identity(), modify_server_default=None, ) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.alter_column('foo', 'x', existing_type=sa.Integer(), " "server_default=None)", ) class RenderNamingConventionTest(TestBase): def setUp(self): convention = { "ix": "ix_%(custom)s_%(column_0_label)s", "uq": "uq_%(custom)s_%(table_name)s_%(column_0_name)s", "ck": "ck_%(custom)s_%(table_name)s", "fk": "fk_%(custom)s_%(table_name)s_" "%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(custom)s_%(table_name)s", "custom": lambda const, table: "ct", } self.metadata = MetaData(naming_convention=convention) ctx_opts = { "sqlalchemy_module_prefix": "sa.", "alembic_module_prefix": "op.", "target_metadata": MetaData(), } context = MigrationContext.configure( dialect_name="postgresql", opts=ctx_opts ) self.autogen_context = api.AutogenContext(context) def test_schema_type_boolean(self): t = Table("t", self.metadata, Column("c", Boolean(name="xyz"))) op_obj = ops.AddColumnOp.from_column(t.c.c) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.add_column('t', " "sa.Column('c', sa.Boolean(name='xyz'), nullable=True))", ) def test_explicit_unique_constraint(self): t = Table("t", self.metadata, Column("c", Integer)) eq_ignore_whitespace( autogenerate.render._render_unique_constraint( UniqueConstraint(t.c.c, deferrable="XYZ"), self.autogen_context, None, ), "sa.UniqueConstraint('c', deferrable='XYZ', " "name=op.f('uq_ct_t_c'))", ) def test_explicit_named_unique_constraint(self): t = Table("t", self.metadata, Column("c", Integer)) eq_ignore_whitespace( autogenerate.render._render_unique_constraint( UniqueConstraint(t.c.c, name="q"), self.autogen_context, None ), "sa.UniqueConstraint('c', name='q')", ) def test_render_add_index(self): t = Table( "test", self.metadata, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index(None, t.c.active, t.c.code) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index(op.f('ix_ct_test_active'), 'test', " "['active', 'code'], unique=False)", ) def test_render_drop_index(self): t = Table( "test", self.metadata, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), ) idx = Index(None, t.c.active, t.c.code) op_obj = ops.DropIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.drop_index(op.f('ix_ct_test_active'), table_name='test')", ) def test_render_add_index_schema(self): t = Table( "test", self.metadata, Column("id", Integer, primary_key=True), Column("active", Boolean()), Column("code", String(255)), schema="CamelSchema", ) idx = Index(None, t.c.active, t.c.code) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_index(op.f('ix_ct_CamelSchema_test_active'), 'test', " "['active', 'code'], unique=False, schema='CamelSchema')", ) def test_implicit_unique_constraint(self): t = Table("t", self.metadata, Column("c", Integer, unique=True)) uq = [c for c in t.constraints if isinstance(c, UniqueConstraint)][0] eq_ignore_whitespace( autogenerate.render._render_unique_constraint( uq, self.autogen_context, None ), "sa.UniqueConstraint('c', name=op.f('uq_ct_t_c'))", ) def test_inline_pk_constraint(self): t = Table("t", self.metadata, Column("c", Integer, primary_key=True)) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t',sa.Column('c', sa.Integer(), nullable=False)," "sa.PrimaryKeyConstraint('c', name=op.f('pk_ct_t')))", ) def test_inline_ck_constraint(self): t = Table( "t", self.metadata, Column("c", Integer), CheckConstraint("c > 5") ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t',sa.Column('c', sa.Integer(), nullable=True)," "sa.CheckConstraint('c > 5', name=op.f('ck_ct_t')))", ) def test_inline_fk(self): t = Table("t", self.metadata, Column("c", Integer, ForeignKey("q.id"))) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(self.autogen_context, op_obj), "op.create_table('t',sa.Column('c', sa.Integer(), nullable=True)," "sa.ForeignKeyConstraint(['c'], ['q.id'], " "name=op.f('fk_ct_t_c_q')))", ) def test_render_check_constraint_renamed(self): """test that constraints from autogenerate render with the naming convention name explicitly. These names should be frozen into the migration scripts so that they remain the same if the application's naming convention changes. However, op.create_table() and others need to be careful that these don't double up when the "%(constraint_name)s" token is used. """ m1 = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) ck = CheckConstraint("im a constraint", name="cc1") Table("t", m1, Column("x"), ck) eq_ignore_whitespace( autogenerate.render._render_check_constraint( ck, self.autogen_context, None ), "sa.CheckConstraint('im a constraint', name=op.f('ck_t_cc1'))", ) def test_create_table_plus_add_index_in_modify(self): uo = ops.UpgradeOps( ops=[ ops.CreateTableOp( "sometable", [Column("x", Integer), Column("y", Integer)] ), ops.ModifyTableOps( "sometable", ops=[ops.CreateIndexOp("ix1", "sometable", ["x", "y"])], ), ] ) eq_( autogenerate.render_python_code(uo, render_as_batch=True), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('sometable',\n" " sa.Column('x', sa.Integer(), nullable=True),\n" " sa.Column('y', sa.Integer(), nullable=True)\n" " )\n" " with op.batch_alter_table('sometable', schema=None) " "as batch_op:\n" " batch_op.create_index(" "'ix1', ['x', 'y'], unique=False)\n\n" " # ### end Alembic commands ###", ) alembic-rel_1_7_6/tests/test_batch.py000066400000000000000000002361461417624537100200070ustar00rootroot00000000000000from contextlib import contextmanager import re from sqlalchemy import Boolean from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Enum from sqlalchemy import ForeignKey from sqlalchemy import ForeignKeyConstraint from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import JSON from sqlalchemy import MetaData from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text from sqlalchemy import UniqueConstraint from sqlalchemy.dialects import sqlite as sqlite_dialect from sqlalchemy.schema import CreateIndex from sqlalchemy.schema import CreateTable from sqlalchemy.sql import column from sqlalchemy.sql import text from alembic import testing from alembic.ddl import sqlite from alembic.operations import Operations from alembic.operations.batch import ApplyBatchImpl from alembic.runtime.migration import MigrationContext from alembic.testing import assert_raises_message from alembic.testing import config from alembic.testing import eq_ from alembic.testing import exclusions from alembic.testing import is_ from alembic.testing import mock from alembic.testing import TestBase from alembic.testing.fixtures import op_fixture from alembic.util import exc as alembic_exc from alembic.util.sqla_compat import _safe_commit_connection_transaction from alembic.util.sqla_compat import _select from alembic.util.sqla_compat import has_computed from alembic.util.sqla_compat import has_identity from alembic.util.sqla_compat import sqla_14 if has_computed: from alembic.util.sqla_compat import Computed if has_identity: from alembic.util.sqla_compat import Identity class BatchApplyTest(TestBase): def setUp(self): self.op = Operations(mock.Mock(opts={})) self.impl = sqlite.SQLiteImpl( sqlite_dialect.dialect(), None, False, False, None, {} ) def _simple_fixture(self, table_args=(), table_kwargs={}, **kw): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("x", String(10)), Column("y", Integer), ) return ApplyBatchImpl( self.impl, t, table_args, table_kwargs, False, **kw ) def _uq_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("x", String()), Column("y", Integer), UniqueConstraint("y", name="uq1"), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _named_ck_table_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("x", String()), Column("y", Integer), CheckConstraint("y > 5", name="ck1"), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _named_ck_col_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("x", String()), Column("y", Integer, CheckConstraint("y > 5", name="ck1")), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _ix_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("x", String()), Column("y", Integer), Index("ix1", "y"), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _pk_fixture(self): m = MetaData() t = Table( "tname", m, Column("id", Integer), Column("x", String()), Column("y", Integer), PrimaryKeyConstraint("id", name="mypk"), ) return ApplyBatchImpl(self.impl, t, (), {}, False) def _literal_ck_fixture( self, copy_from=None, table_args=(), table_kwargs={} ): m = MetaData() if copy_from is not None: t = copy_from else: t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("email", String()), CheckConstraint("email LIKE '%@%'"), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _sql_ck_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("email", String()), ) t.append_constraint(CheckConstraint(t.c.email.like("%@%"))) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _fk_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("email", String()), Column("user_id", Integer, ForeignKey("user.id")), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _multi_fk_fixture(self, table_args=(), table_kwargs={}, schema=None): m = MetaData() if schema: schemaarg = "%s." % schema else: schemaarg = "" t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("email", String()), Column("user_id_1", Integer, ForeignKey("%suser.id" % schemaarg)), Column("user_id_2", Integer, ForeignKey("%suser.id" % schemaarg)), Column("user_id_3", Integer), Column("user_id_version", Integer), ForeignKeyConstraint( ["user_id_3", "user_id_version"], ["%suser.id" % schemaarg, "%suser.id_version" % schemaarg], ), schema=schema, ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _named_fk_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("email", String()), Column("user_id", Integer, ForeignKey("user.id", name="ufk")), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _selfref_fk_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("parent_id", Integer, ForeignKey("tname.id")), Column("data", String), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _boolean_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("flag", Boolean(create_constraint=True)), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _boolean_no_ck_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("flag", Boolean(create_constraint=False)), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _enum_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("thing", Enum("a", "b", "c", create_constraint=True)), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _server_default_fixture(self, table_args=(), table_kwargs={}): m = MetaData() t = Table( "tname", m, Column("id", Integer, primary_key=True), Column("thing", String(), server_default=""), ) return ApplyBatchImpl(self.impl, t, table_args, table_kwargs, False) def _assert_impl( self, impl, colnames=None, ddl_contains=None, ddl_not_contains=None, dialect="default", schema=None, ): context = op_fixture(dialect=dialect) impl._create(context.impl) if colnames is None: colnames = ["id", "x", "y"] eq_(impl.new_table.c.keys(), colnames) pk_cols = [col for col in impl.new_table.c if col.primary_key] eq_(list(impl.new_table.primary_key), pk_cols) create_stmt = str( CreateTable(impl.new_table).compile(dialect=context.dialect) ) create_stmt = re.sub(r"[\n\t]", "", create_stmt) idx_stmt = "" for idx in impl.indexes.values(): idx_stmt += str(CreateIndex(idx).compile(dialect=context.dialect)) for idx in impl.new_indexes.values(): impl.new_table.name = impl.table.name idx_stmt += str(CreateIndex(idx).compile(dialect=context.dialect)) impl.new_table.name = ApplyBatchImpl._calc_temp_name( impl.table.name ) idx_stmt = re.sub(r"[\n\t]", "", idx_stmt) if ddl_contains: assert ddl_contains in create_stmt + idx_stmt if ddl_not_contains: assert ddl_not_contains not in create_stmt + idx_stmt expected = [create_stmt] if schema: args = {"schema": "%s." % schema} else: args = {"schema": ""} args["temp_name"] = impl.new_table.name args["colnames"] = ", ".join( [ impl.new_table.c[name].name for name in colnames if name in impl.table.c ] ) args["tname_colnames"] = ", ".join( "CAST(%(schema)stname.%(name)s AS %(type)s) AS %(cast_label)s" % { "schema": args["schema"], "name": name, "type": impl.new_table.c[name].type, "cast_label": name if sqla_14 else "anon_1", } if ( impl.new_table.c[name].type._type_affinity is not impl.table.c[name].type._type_affinity ) else "%(schema)stname.%(name)s" % {"schema": args["schema"], "name": name} for name in colnames if name in impl.table.c ) expected.extend( [ "INSERT INTO %(schema)s%(temp_name)s (%(colnames)s) " "SELECT %(tname_colnames)s FROM %(schema)stname" % args, "DROP TABLE %(schema)stname" % args, "ALTER TABLE %(schema)s%(temp_name)s " "RENAME TO %(schema)stname" % args, ] ) if idx_stmt: expected.append(idx_stmt) context.assert_(*expected) return impl.new_table def test_change_type(self): impl = self._simple_fixture() impl.alter_column("tname", "x", type_=String) new_table = self._assert_impl(impl) assert new_table.c.x.type._type_affinity is String def test_rename_col(self): impl = self._simple_fixture() impl.alter_column("tname", "x", name="q") new_table = self._assert_impl(impl) eq_(new_table.c.x.name, "q") def test_alter_column_comment(self): impl = self._simple_fixture() impl.alter_column("tname", "x", comment="some comment") new_table = self._assert_impl(impl) eq_(new_table.c.x.comment, "some comment") def test_add_column_comment(self): impl = self._simple_fixture() impl.add_column("tname", Column("q", Integer, comment="some comment")) new_table = self._assert_impl(impl, colnames=["id", "x", "y", "q"]) eq_(new_table.c.q.comment, "some comment") def test_rename_col_boolean(self): impl = self._boolean_fixture() impl.alter_column("tname", "flag", name="bflag") new_table = self._assert_impl( impl, ddl_contains="CHECK (bflag IN (0, 1)", colnames=["id", "flag"], ) eq_(new_table.c.flag.name, "bflag") eq_( len( [ const for const in new_table.constraints if isinstance(const, CheckConstraint) ] ), 1, ) def test_change_type_schematype_to_non(self): impl = self._boolean_fixture() impl.alter_column("tname", "flag", type_=Integer) new_table = self._assert_impl( impl, colnames=["id", "flag"], ddl_not_contains="CHECK" ) assert new_table.c.flag.type._type_affinity is Integer # NOTE: we can't do test_change_type_non_to_schematype # at this level because the "add_constraint" part of this # comes from toimpl.py, which we aren't testing here def test_rename_col_boolean_no_ck(self): impl = self._boolean_no_ck_fixture() impl.alter_column("tname", "flag", name="bflag") new_table = self._assert_impl( impl, ddl_not_contains="CHECK", colnames=["id", "flag"] ) eq_(new_table.c.flag.name, "bflag") eq_( len( [ const for const in new_table.constraints if isinstance(const, CheckConstraint) ] ), 0, ) def test_rename_col_enum(self): impl = self._enum_fixture() impl.alter_column("tname", "thing", name="thang") new_table = self._assert_impl( impl, ddl_contains="CHECK (thang IN ('a', 'b', 'c')", colnames=["id", "thing"], ) eq_(new_table.c.thing.name, "thang") eq_( len( [ const for const in new_table.constraints if isinstance(const, CheckConstraint) ] ), 1, ) def test_rename_col_literal_ck(self): impl = self._literal_ck_fixture() impl.alter_column("tname", "email", name="emol") new_table = self._assert_impl( # note this is wrong, we don't dig into the SQL impl, ddl_contains="CHECK (email LIKE '%@%')", colnames=["id", "email"], ) eq_( len( [ c for c in new_table.constraints if isinstance(c, CheckConstraint) ] ), 1, ) eq_(new_table.c.email.name, "emol") def test_rename_col_literal_ck_workaround(self): impl = self._literal_ck_fixture( copy_from=Table( "tname", MetaData(), Column("id", Integer, primary_key=True), Column("email", String), ), table_args=[CheckConstraint("emol LIKE '%@%'")], ) impl.alter_column("tname", "email", name="emol") new_table = self._assert_impl( impl, ddl_contains="CHECK (emol LIKE '%@%')", colnames=["id", "email"], ) eq_( len( [ c for c in new_table.constraints if isinstance(c, CheckConstraint) ] ), 1, ) eq_(new_table.c.email.name, "emol") def test_rename_col_sql_ck(self): impl = self._sql_ck_fixture() impl.alter_column("tname", "email", name="emol") new_table = self._assert_impl( impl, ddl_contains="CHECK (emol LIKE '%@%')", colnames=["id", "email"], ) eq_( len( [ c for c in new_table.constraints if isinstance(c, CheckConstraint) ] ), 1, ) eq_(new_table.c.email.name, "emol") def test_add_col(self): impl = self._simple_fixture() col = Column("g", Integer) # operations.add_column produces a table t = self.op.schema_obj.table("tname", col) # noqa impl.add_column("tname", col) new_table = self._assert_impl(impl, colnames=["id", "x", "y", "g"]) eq_(new_table.c.g.name, "g") def test_partial_reordering(self): impl = self._simple_fixture(partial_reordering=[("x", "id", "y")]) new_table = self._assert_impl(impl, colnames=["x", "id", "y"]) eq_(new_table.c.x.name, "x") def test_add_col_partial_reordering(self): impl = self._simple_fixture(partial_reordering=[("id", "x", "g", "y")]) col = Column("g", Integer) # operations.add_column produces a table t = self.op.schema_obj.table("tname", col) # noqa impl.add_column("tname", col) new_table = self._assert_impl(impl, colnames=["id", "x", "g", "y"]) eq_(new_table.c.g.name, "g") def test_add_col_insert_before(self): impl = self._simple_fixture() col = Column("g", Integer) # operations.add_column produces a table t = self.op.schema_obj.table("tname", col) # noqa impl.add_column("tname", col, insert_before="x") new_table = self._assert_impl(impl, colnames=["id", "g", "x", "y"]) eq_(new_table.c.g.name, "g") def test_add_col_insert_before_beginning(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_before="id") new_table = self._assert_impl(impl, colnames=["g", "id", "x", "y"]) eq_(new_table.c.g.name, "g") def test_add_col_insert_before_middle(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_before="y") new_table = self._assert_impl(impl, colnames=["id", "x", "g", "y"]) eq_(new_table.c.g.name, "g") def test_add_col_insert_after_middle(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_after="id") new_table = self._assert_impl(impl, colnames=["id", "g", "x", "y"]) eq_(new_table.c.g.name, "g") def test_add_col_insert_after_penultimate(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_after="x") self._assert_impl(impl, colnames=["id", "x", "g", "y"]) def test_add_col_insert_after_end(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_after="y") new_table = self._assert_impl(impl, colnames=["id", "x", "y", "g"]) eq_(new_table.c.g.name, "g") def test_add_col_insert_after_plus_no_order(self): impl = self._simple_fixture() # operations.add_column produces a table impl.add_column("tname", Column("g", Integer), insert_after="id") impl.add_column("tname", Column("q", Integer)) new_table = self._assert_impl( impl, colnames=["id", "g", "x", "y", "q"] ) eq_(new_table.c.g.name, "g") def test_add_col_no_order_plus_insert_after(self): impl = self._simple_fixture() col = Column("g", Integer) # operations.add_column produces a table t = self.op.schema_obj.table("tname", col) # noqa impl.add_column("tname", Column("q", Integer)) impl.add_column("tname", Column("g", Integer), insert_after="id") new_table = self._assert_impl( impl, colnames=["id", "g", "x", "y", "q"] ) eq_(new_table.c.g.name, "g") def test_add_col_insert_after_another_insert(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_after="id") impl.add_column("tname", Column("q", Integer), insert_after="g") new_table = self._assert_impl( impl, colnames=["id", "g", "q", "x", "y"] ) eq_(new_table.c.g.name, "g") def test_add_col_insert_before_another_insert(self): impl = self._simple_fixture() impl.add_column("tname", Column("g", Integer), insert_after="id") impl.add_column("tname", Column("q", Integer), insert_before="g") new_table = self._assert_impl( impl, colnames=["id", "q", "g", "x", "y"] ) eq_(new_table.c.g.name, "g") def test_add_server_default(self): impl = self._simple_fixture() impl.alter_column("tname", "y", server_default="10") new_table = self._assert_impl(impl, ddl_contains="DEFAULT '10'") eq_(new_table.c.y.server_default.arg, "10") def test_drop_server_default(self): impl = self._server_default_fixture() impl.alter_column("tname", "thing", server_default=None) new_table = self._assert_impl( impl, colnames=["id", "thing"], ddl_not_contains="DEFAULT" ) eq_(new_table.c.thing.server_default, None) def test_rename_col_pk(self): impl = self._simple_fixture() impl.alter_column("tname", "id", name="foobar") new_table = self._assert_impl( impl, ddl_contains="PRIMARY KEY (foobar)" ) eq_(new_table.c.id.name, "foobar") eq_(list(new_table.primary_key), [new_table.c.id]) def test_rename_col_fk(self): impl = self._fk_fixture() impl.alter_column("tname", "user_id", name="foobar") new_table = self._assert_impl( impl, colnames=["id", "email", "user_id"], ddl_contains='FOREIGN KEY(foobar) REFERENCES "user" (id)', ) eq_(new_table.c.user_id.name, "foobar") eq_( list(new_table.c.user_id.foreign_keys)[0]._get_colspec(), "user.id" ) def test_regen_multi_fk(self): impl = self._multi_fk_fixture() self._assert_impl( impl, colnames=[ "id", "email", "user_id_1", "user_id_2", "user_id_3", "user_id_version", ], ddl_contains="FOREIGN KEY(user_id_3, user_id_version) " 'REFERENCES "user" (id, id_version)', ) def test_regen_multi_fk_schema(self): impl = self._multi_fk_fixture(schema="foo_schema") self._assert_impl( impl, colnames=[ "id", "email", "user_id_1", "user_id_2", "user_id_3", "user_id_version", ], ddl_contains="FOREIGN KEY(user_id_3, user_id_version) " 'REFERENCES foo_schema."user" (id, id_version)', schema="foo_schema", ) def test_do_not_add_existing_columns_columns(self): impl = self._multi_fk_fixture() meta = impl.table.metadata cid = Column("id", Integer()) user = Table("user", meta, cid) fk = [ c for c in impl.unnamed_constraints if isinstance(c, ForeignKeyConstraint) ] impl._setup_referent(meta, fk[0]) is_(user.c.id, cid) def test_drop_col(self): impl = self._simple_fixture() impl.drop_column("tname", column("x")) new_table = self._assert_impl(impl, colnames=["id", "y"]) assert "y" in new_table.c assert "x" not in new_table.c def test_drop_col_remove_pk(self): impl = self._simple_fixture() impl.drop_column("tname", column("id")) new_table = self._assert_impl( impl, colnames=["x", "y"], ddl_not_contains="PRIMARY KEY" ) assert "y" in new_table.c assert "id" not in new_table.c assert not new_table.primary_key def test_drop_col_remove_fk(self): impl = self._fk_fixture() impl.drop_column("tname", column("user_id")) new_table = self._assert_impl( impl, colnames=["id", "email"], ddl_not_contains="FOREIGN KEY" ) assert "user_id" not in new_table.c assert not new_table.foreign_keys def test_drop_col_retain_fk(self): impl = self._fk_fixture() impl.drop_column("tname", column("email")) new_table = self._assert_impl( impl, colnames=["id", "user_id"], ddl_contains='FOREIGN KEY(user_id) REFERENCES "user" (id)', ) assert "email" not in new_table.c assert new_table.c.user_id.foreign_keys def test_drop_col_retain_fk_selfref(self): impl = self._selfref_fk_fixture() impl.drop_column("tname", column("data")) new_table = self._assert_impl(impl, colnames=["id", "parent_id"]) assert "data" not in new_table.c assert new_table.c.parent_id.foreign_keys def test_add_fk(self): impl = self._simple_fixture() impl.add_column("tname", Column("user_id", Integer)) fk = self.op.schema_obj.foreign_key_constraint( "fk1", "tname", "user", ["user_id"], ["id"] ) impl.add_constraint(fk) new_table = self._assert_impl( impl, colnames=["id", "x", "y", "user_id"], ddl_contains="CONSTRAINT fk1 FOREIGN KEY(user_id) " 'REFERENCES "user" (id)', ) eq_( list(new_table.c.user_id.foreign_keys)[0]._get_colspec(), "user.id" ) def test_drop_fk(self): impl = self._named_fk_fixture() fk = ForeignKeyConstraint([], [], name="ufk") impl.drop_constraint(fk) new_table = self._assert_impl( impl, colnames=["id", "email", "user_id"], ddl_not_contains="CONSTRANT fk1", ) eq_(list(new_table.foreign_keys), []) def test_add_uq(self): impl = self._simple_fixture() uq = self.op.schema_obj.unique_constraint("uq1", "tname", ["y"]) impl.add_constraint(uq) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_contains="CONSTRAINT uq1 UNIQUE", ) def test_drop_uq(self): impl = self._uq_fixture() uq = self.op.schema_obj.unique_constraint("uq1", "tname", ["y"]) impl.drop_constraint(uq) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_not_contains="CONSTRAINT uq1 UNIQUE", ) def test_add_ck(self): impl = self._simple_fixture() ck = self.op.schema_obj.check_constraint("ck1", "tname", "y > 5") impl.add_constraint(ck) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_contains="CONSTRAINT ck1 CHECK (y > 5)", ) def test_drop_ck_table(self): impl = self._named_ck_table_fixture() ck = self.op.schema_obj.check_constraint("ck1", "tname", "y > 5") impl.drop_constraint(ck) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_not_contains="CONSTRAINT ck1 CHECK (y > 5)", ) def test_drop_ck_col(self): impl = self._named_ck_col_fixture() ck = self.op.schema_obj.check_constraint("ck1", "tname", "y > 5") impl.drop_constraint(ck) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_not_contains="CONSTRAINT ck1 CHECK (y > 5)", ) def test_create_index(self): impl = self._simple_fixture() ix = self.op.schema_obj.index("ix1", "tname", ["y"]) impl.create_index(ix) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_contains="CREATE INDEX ix1" ) def test_drop_index(self): impl = self._ix_fixture() ix = self.op.schema_obj.index("ix1", "tname", ["y"]) impl.drop_index(ix) self._assert_impl( impl, colnames=["id", "x", "y"], ddl_not_contains="CONSTRAINT uq1 UNIQUE", ) def test_add_table_opts(self): impl = self._simple_fixture(table_kwargs={"mysql_engine": "InnoDB"}) self._assert_impl(impl, ddl_contains="ENGINE=InnoDB", dialect="mysql") def test_drop_pk(self): impl = self._pk_fixture() pk = self.op.schema_obj.primary_key_constraint("mypk", "tname", ["id"]) impl.drop_constraint(pk) new_table = self._assert_impl(impl) assert not new_table.c.id.primary_key assert not len(new_table.primary_key) class BatchAPITest(TestBase): @contextmanager def _fixture(self, schema=None): migration_context = mock.Mock( opts={}, impl=mock.MagicMock(__dialect__="sqlite", connection=object()), ) op = Operations(migration_context) batch = op.batch_alter_table( "tname", recreate="never", schema=schema ).__enter__() mock_schema = mock.MagicMock() with mock.patch("alembic.operations.schemaobj.sa_schema", mock_schema): yield batch batch.impl.flush() self.mock_schema = mock_schema def test_drop_col(self): with self._fixture() as batch: batch.drop_column("q") eq_( batch.impl.operations.impl.mock_calls, [ mock.call.drop_column( "tname", self.mock_schema.Column(), schema=None ) ], ) def test_add_col(self): column = Column("w", String(50)) with self._fixture() as batch: batch.add_column(column) assert ( mock.call.add_column("tname", column, schema=None) in batch.impl.operations.impl.mock_calls ) def test_create_fk(self): with self._fixture() as batch: batch.create_foreign_key("myfk", "user", ["x"], ["y"]) eq_( self.mock_schema.ForeignKeyConstraint.mock_calls, [ mock.call( ["x"], ["user.y"], onupdate=None, ondelete=None, name="myfk", initially=None, deferrable=None, match=None, ) ], ) eq_( self.mock_schema.Table.mock_calls, [ mock.call( "user", self.mock_schema.MetaData(), self.mock_schema.Column(), schema=None, ), mock.call( "tname", self.mock_schema.MetaData(), self.mock_schema.Column(), schema=None, ), mock.call().append_constraint( self.mock_schema.ForeignKeyConstraint() ), ], ) eq_( batch.impl.operations.impl.mock_calls, [ mock.call.add_constraint( self.mock_schema.ForeignKeyConstraint() ) ], ) def test_create_fk_schema(self): with self._fixture(schema="foo") as batch: batch.create_foreign_key("myfk", "user", ["x"], ["y"]) eq_( self.mock_schema.ForeignKeyConstraint.mock_calls, [ mock.call( ["x"], ["user.y"], onupdate=None, ondelete=None, name="myfk", initially=None, deferrable=None, match=None, ) ], ) eq_( self.mock_schema.Table.mock_calls, [ mock.call( "user", self.mock_schema.MetaData(), self.mock_schema.Column(), schema=None, ), mock.call( "tname", self.mock_schema.MetaData(), self.mock_schema.Column(), schema="foo", ), mock.call().append_constraint( self.mock_schema.ForeignKeyConstraint() ), ], ) eq_( batch.impl.operations.impl.mock_calls, [ mock.call.add_constraint( self.mock_schema.ForeignKeyConstraint() ) ], ) def test_create_uq(self): with self._fixture() as batch: batch.create_unique_constraint("uq1", ["a", "b"]) eq_( self.mock_schema.Table().c.__getitem__.mock_calls, [mock.call("a"), mock.call("b")], ) eq_( self.mock_schema.UniqueConstraint.mock_calls, [ mock.call( self.mock_schema.Table().c.__getitem__(), self.mock_schema.Table().c.__getitem__(), name="uq1", ) ], ) eq_( batch.impl.operations.impl.mock_calls, [mock.call.add_constraint(self.mock_schema.UniqueConstraint())], ) def test_create_pk(self): with self._fixture() as batch: batch.create_primary_key("pk1", ["a", "b"]) eq_( self.mock_schema.Table().c.__getitem__.mock_calls, [mock.call("a"), mock.call("b")], ) eq_( self.mock_schema.PrimaryKeyConstraint.mock_calls, [ mock.call( self.mock_schema.Table().c.__getitem__(), self.mock_schema.Table().c.__getitem__(), name="pk1", ) ], ) eq_( batch.impl.operations.impl.mock_calls, [ mock.call.add_constraint( self.mock_schema.PrimaryKeyConstraint() ) ], ) def test_create_check(self): expr = text("a > b") with self._fixture() as batch: batch.create_check_constraint("ck1", expr) eq_( self.mock_schema.CheckConstraint.mock_calls, [mock.call(expr, name="ck1")], ) eq_( batch.impl.operations.impl.mock_calls, [mock.call.add_constraint(self.mock_schema.CheckConstraint())], ) def test_drop_constraint(self): with self._fixture() as batch: batch.drop_constraint("uq1") eq_(self.mock_schema.Constraint.mock_calls, [mock.call(name="uq1")]) eq_( batch.impl.operations.impl.mock_calls, [mock.call.drop_constraint(self.mock_schema.Constraint())], ) class CopyFromTest(TestBase): def _fixture(self): self.metadata = MetaData() self.table = Table( "foo", self.metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), Column("x", Integer), ) context = op_fixture(dialect="sqlite", as_sql=True) self.op = Operations(context) return context @config.requirements.sqlalchemy_13 def test_change_type(self): context = self._fixture() self.table.append_column(Column("toj", Text)) self.table.append_column(Column("fromj", JSON)) with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.alter_column("data", type_=Integer) batch_op.alter_column("toj", type_=JSON) batch_op.alter_column("fromj", type_=Text) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data INTEGER, x INTEGER, toj JSON, fromj TEXT, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x, toj, fromj) " "SELECT foo.id, " "CAST(foo.data AS INTEGER) AS %s, foo.x, foo.toj, " "CAST(foo.fromj AS TEXT) AS %s FROM foo" % ( ("data" if sqla_14 else "anon_1"), ("fromj" if sqla_14 else "anon_2"), ), "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", ) def test_change_type_from_schematype(self): context = self._fixture() self.table.append_column( Column("y", Boolean(create_constraint=True, name="ck1")) ) with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.alter_column( "y", type_=Integer, existing_type=Boolean(create_constraint=True, name="ck1"), ) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data VARCHAR(50), x INTEGER, y INTEGER, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x, y) SELECT foo.id, " "foo.data, foo.x, CAST(foo.y AS INTEGER) AS %s FROM foo" % (("y" if sqla_14 else "anon_1"),), "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", ) def test_change_name_from_existing_variant_type(self): """test #982""" context = self._fixture() self.table.append_column( Column("y", Text().with_variant(Text(10000), "mysql")) ) with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.alter_column( column_name="y", new_column_name="q", existing_type=Text().with_variant(Text(10000), "mysql"), ) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data VARCHAR(50), x INTEGER, q TEXT, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x, q) " "SELECT foo.id, foo.data, foo.x, foo.y FROM foo", "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", ) def test_change_type_to_schematype(self): context = self._fixture() self.table.append_column(Column("y", Integer)) with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.alter_column( "y", existing_type=Integer, type_=Boolean(create_constraint=True, name="ck1"), ) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data VARCHAR(50), x INTEGER, y BOOLEAN, PRIMARY KEY (id), " "CONSTRAINT ck1 CHECK (y IN (0, 1)))", "INSERT INTO _alembic_tmp_foo (id, data, x, y) SELECT foo.id, " "foo.data, foo.x, CAST(foo.y AS BOOLEAN) AS %s FROM foo" % (("y" if sqla_14 else "anon_1"),), "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", ) def test_create_drop_index_w_always(self): context = self._fixture() with self.op.batch_alter_table( "foo", copy_from=self.table, recreate="always" ) as batch_op: batch_op.create_index("ix_data", ["data"], unique=True) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data VARCHAR(50), " "x INTEGER, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x) " "SELECT foo.id, foo.data, foo.x FROM foo", "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", "CREATE UNIQUE INDEX ix_data ON foo (data)", ) context.clear_assertions() Index("ix_data", self.table.c.data, unique=True) with self.op.batch_alter_table( "foo", copy_from=self.table, recreate="always" ) as batch_op: batch_op.drop_index("ix_data") context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data VARCHAR(50), x INTEGER, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x) " "SELECT foo.id, foo.data, foo.x FROM foo", "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", ) def test_create_drop_index_wo_always(self): context = self._fixture() with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.create_index("ix_data", ["data"], unique=True) context.assert_("CREATE UNIQUE INDEX ix_data ON foo (data)") context.clear_assertions() Index("ix_data", self.table.c.data, unique=True) with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.drop_index("ix_data") context.assert_("DROP INDEX ix_data") def test_create_drop_index_w_other_ops(self): context = self._fixture() with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.alter_column("data", type_=Integer) batch_op.create_index("ix_data", ["data"], unique=True) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data INTEGER, x INTEGER, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x) SELECT foo.id, " "CAST(foo.data AS INTEGER) AS %s, foo.x FROM foo" % (("data" if sqla_14 else "anon_1"),), "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", "CREATE UNIQUE INDEX ix_data ON foo (data)", ) context.clear_assertions() Index("ix_data", self.table.c.data, unique=True) with self.op.batch_alter_table( "foo", copy_from=self.table ) as batch_op: batch_op.drop_index("ix_data") batch_op.alter_column("data", type_=String) context.assert_( "CREATE TABLE _alembic_tmp_foo (id INTEGER NOT NULL, " "data VARCHAR, x INTEGER, PRIMARY KEY (id))", "INSERT INTO _alembic_tmp_foo (id, data, x) SELECT foo.id, " "foo.data, foo.x FROM foo", "DROP TABLE foo", "ALTER TABLE _alembic_tmp_foo RENAME TO foo", ) class BatchRoundTripTest(TestBase): __only_on__ = "sqlite" def setUp(self): self.conn = config.db.connect() self.metadata = MetaData() t1 = Table( "foo", self.metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), Column("x", Integer), mysql_engine="InnoDB", ) with self.conn.begin(): t1.create(self.conn) self.conn.execute( t1.insert(), [ {"id": 1, "data": "d1", "x": 5}, {"id": 2, "data": "22", "x": 6}, {"id": 3, "data": "8.5", "x": 7}, {"id": 4, "data": "9.46", "x": 8}, {"id": 5, "data": "d5", "x": 9}, ], ) context = MigrationContext.configure(self.conn) self.op = Operations(context) def tearDown(self): # why commit? because SQLite has inconsistent treatment # of transactional DDL. A test that runs CREATE TABLE and then # ALTER TABLE to change the name of that table, will end up # committing the CREATE TABLE but not the ALTER. As batch mode # does this with a temp table name that's not even in the # metadata collection, we don't have an explicit drop for it # (though we could do that too). calling commit means the # ALTER will go through and the drop_all() will then catch it. _safe_commit_connection_transaction(self.conn) with self.conn.begin(): self.metadata.drop_all(self.conn) self.conn.close() @contextmanager def _sqlite_referential_integrity(self): self.conn.exec_driver_sql("PRAGMA foreign_keys=ON") try: yield finally: self.conn.exec_driver_sql("PRAGMA foreign_keys=OFF") # as these tests are typically intentional fails, clean out # tables left over m = MetaData() m.reflect(self.conn) with self.conn.begin(): m.drop_all(self.conn) def _no_pk_fixture(self): with self.conn.begin(): nopk = Table( "nopk", self.metadata, Column("a", Integer), Column("b", Integer), Column("c", Integer), mysql_engine="InnoDB", ) nopk.create(self.conn) self.conn.execute( nopk.insert(), [{"a": 1, "b": 2, "c": 3}, {"a": 2, "b": 4, "c": 5}], ) return nopk def _table_w_index_fixture(self): with self.conn.begin(): t = Table( "t_w_ix", self.metadata, Column("id", Integer, primary_key=True), Column("thing", Integer), Column("data", String(20)), ) Index("ix_thing", t.c.thing) t.create(self.conn) return t def _boolean_fixture(self): with self.conn.begin(): t = Table( "hasbool", self.metadata, Column("x", Boolean(create_constraint=True, name="ck1")), Column("y", Integer), ) t.create(self.conn) def _timestamp_fixture(self): with self.conn.begin(): t = Table("hasts", self.metadata, Column("x", DateTime())) t.create(self.conn) return t def _ck_constraint_fixture(self): with self.conn.begin(): t = Table( "ck_table", self.metadata, Column("id", Integer, nullable=False), CheckConstraint("id is not NULL", name="ck"), ) t.create(self.conn) return t def _datetime_server_default_fixture(self): return func.datetime("now", "localtime") def _timestamp_w_expr_default_fixture(self): with self.conn.begin(): t = Table( "hasts", self.metadata, Column( "x", DateTime(), server_default=self._datetime_server_default_fixture(), nullable=False, ), ) t.create(self.conn) return t def _int_to_boolean_fixture(self): with self.conn.begin(): t = Table("hasbool", self.metadata, Column("x", Integer)) t.create(self.conn) def test_change_type_boolean_to_int(self): self._boolean_fixture() with self.op.batch_alter_table("hasbool") as batch_op: batch_op.alter_column( "x", type_=Integer, existing_type=Boolean(create_constraint=True, name="ck1"), ) insp = inspect(self.conn) eq_( [ c["type"]._type_affinity for c in insp.get_columns("hasbool") if c["name"] == "x" ], [Integer], ) def test_no_net_change_timestamp(self): t = self._timestamp_fixture() import datetime with self.conn.begin(): self.conn.execute( t.insert(), {"x": datetime.datetime(2012, 5, 18, 15, 32, 5)} ) with self.op.batch_alter_table("hasts") as batch_op: batch_op.alter_column("x", type_=DateTime()) eq_( self.conn.execute(_select(t.c.x)).fetchall(), [(datetime.datetime(2012, 5, 18, 15, 32, 5),)], ) def test_no_net_change_timestamp_w_default(self): t = self._timestamp_w_expr_default_fixture() with self.op.batch_alter_table("hasts") as batch_op: batch_op.alter_column( "x", type_=DateTime(), nullable=False, server_default=self._datetime_server_default_fixture(), ) with self.conn.begin(): self.conn.execute(t.insert()) res = self.conn.execute(_select(t.c.x)) if sqla_14: assert res.scalar_one_or_none() is not None else: row = res.fetchone() assert row["x"] is not None def test_drop_col_schematype(self): self._boolean_fixture() with self.op.batch_alter_table("hasbool") as batch_op: batch_op.drop_column( "x", existing_type=Boolean(create_constraint=True, name="ck1") ) insp = inspect(self.conn) assert "x" not in (c["name"] for c in insp.get_columns("hasbool")) def test_change_type_int_to_boolean(self): self._int_to_boolean_fixture() with self.op.batch_alter_table("hasbool") as batch_op: batch_op.alter_column( "x", type_=Boolean(create_constraint=True, name="ck1") ) insp = inspect(self.conn) if exclusions.against(config, "sqlite"): eq_( [ c["type"]._type_affinity for c in insp.get_columns("hasbool") if c["name"] == "x" ], [Boolean], ) elif exclusions.against(config, "mysql"): eq_( [ c["type"]._type_affinity for c in insp.get_columns("hasbool") if c["name"] == "x" ], [Integer], ) def _assert_data(self, data, tablename="foo"): res = self.conn.execute(text("select * from %s" % tablename)) if sqla_14: res = res.mappings() eq_([dict(row) for row in res], data) def test_ix_existing(self): self._table_w_index_fixture() with self.op.batch_alter_table("t_w_ix") as batch_op: batch_op.alter_column("data", type_=String(30)) batch_op.create_index("ix_data", ["data"]) insp = inspect(self.conn) eq_( set( (ix["name"], tuple(ix["column_names"])) for ix in insp.get_indexes("t_w_ix") ), set([("ix_data", ("data",)), ("ix_thing", ("thing",))]), ) def test_fk_points_to_me_auto(self): self._test_fk_points_to_me("auto") # in particular, this tests that the failures # on PG and MySQL result in recovery of the batch system, # e.g. that the _alembic_tmp_temp table is dropped @config.requirements.no_referential_integrity def test_fk_points_to_me_recreate(self): self._test_fk_points_to_me("always") @exclusions.only_on("sqlite") @exclusions.fails( "intentionally asserting that this " "doesn't work w/ pragma foreign keys" ) def test_fk_points_to_me_sqlite_refinteg(self): with self._sqlite_referential_integrity(): self._test_fk_points_to_me("auto") def _test_fk_points_to_me(self, recreate): bar = Table( "bar", self.metadata, Column("id", Integer, primary_key=True), Column("foo_id", Integer, ForeignKey("foo.id")), mysql_engine="InnoDB", ) with self.conn.begin(): bar.create(self.conn) self.conn.execute(bar.insert(), {"id": 1, "foo_id": 3}) with self.op.batch_alter_table("foo", recreate=recreate) as batch_op: batch_op.alter_column( "data", new_column_name="newdata", existing_type=String(50) ) insp = inspect(self.conn) eq_( [ ( key["referred_table"], key["referred_columns"], key["constrained_columns"], ) for key in insp.get_foreign_keys("bar") ], [("foo", ["id"], ["foo_id"])], ) def test_selfref_fk_auto(self): self._test_selfref_fk("auto") @config.requirements.no_referential_integrity def test_selfref_fk_recreate(self): self._test_selfref_fk("always") @exclusions.only_on("sqlite") @exclusions.fails( "intentionally asserting that this " "doesn't work w/ pragma foreign keys" ) def test_selfref_fk_sqlite_refinteg(self): with self._sqlite_referential_integrity(): self._test_selfref_fk("auto") def _test_selfref_fk(self, recreate): bar = Table( "bar", self.metadata, Column("id", Integer, primary_key=True), Column("bar_id", Integer, ForeignKey("bar.id")), Column("data", String(50)), mysql_engine="InnoDB", ) with self.conn.begin(): bar.create(self.conn) self.conn.execute( bar.insert(), {"id": 1, "data": "x", "bar_id": None} ) self.conn.execute( bar.insert(), {"id": 2, "data": "y", "bar_id": 1} ) with self.op.batch_alter_table("bar", recreate=recreate) as batch_op: batch_op.alter_column( "data", new_column_name="newdata", existing_type=String(50) ) insp = inspect(self.conn) eq_( [ ( key["referred_table"], key["referred_columns"], key["constrained_columns"], ) for key in insp.get_foreign_keys("bar") ], [("bar", ["id"], ["bar_id"])], ) def test_change_type(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.alter_column("data", type_=Integer) self._assert_data( [ {"id": 1, "data": 0, "x": 5}, {"id": 2, "data": 22, "x": 6}, {"id": 3, "data": 8, "x": 7}, {"id": 4, "data": 9, "x": 8}, {"id": 5, "data": 0, "x": 9}, ] ) def test_drop_column(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.drop_column("data") self._assert_data( [ {"id": 1, "x": 5}, {"id": 2, "x": 6}, {"id": 3, "x": 7}, {"id": 4, "x": 8}, {"id": 5, "x": 9}, ] ) def test_drop_pk_col_readd_col(self): # drop a column, add it back without primary_key=True, should no # longer be in the constraint with self.op.batch_alter_table("foo") as batch_op: batch_op.drop_column("id") batch_op.add_column(Column("id", Integer)) pk_const = inspect(self.conn).get_pk_constraint("foo") eq_(pk_const["constrained_columns"], []) def test_drop_pk_col_readd_pk_col(self): # drop a column, add it back with primary_key=True, should remain with self.op.batch_alter_table("foo") as batch_op: batch_op.drop_column("id") batch_op.add_column(Column("id", Integer, primary_key=True)) pk_const = inspect(self.conn).get_pk_constraint("foo") eq_(pk_const["constrained_columns"], ["id"]) def test_drop_pk_col_readd_col_also_pk_const(self): # drop a column, add it back without primary_key=True, but then # also make anew PK constraint that includes it, should remain with self.op.batch_alter_table("foo") as batch_op: batch_op.drop_column("id") batch_op.add_column(Column("id", Integer)) batch_op.create_primary_key("newpk", ["id"]) pk_const = inspect(self.conn).get_pk_constraint("foo") eq_(pk_const["constrained_columns"], ["id"]) @testing.combinations(("always",), ("auto",), argnames="recreate") def test_add_pk_constraint(self, recreate): self._no_pk_fixture() with self.op.batch_alter_table("nopk", recreate=recreate) as batch_op: batch_op.create_primary_key("newpk", ["a", "b"]) pk_const = inspect(self.conn).get_pk_constraint("nopk") with config.requirements.reflects_pk_names.fail_if(): eq_(pk_const["name"], "newpk") eq_(pk_const["constrained_columns"], ["a", "b"]) @testing.combinations(("always",), ("auto",), argnames="recreate") @config.requirements.check_constraint_reflection def test_add_ck_constraint(self, recreate): with self.op.batch_alter_table("foo", recreate=recreate) as batch_op: batch_op.create_check_constraint("newck", text("x > 0")) ck_consts = inspect(self.conn).get_check_constraints("foo") ck_consts[0]["sqltext"] = re.sub( r"[\'\"`\(\)]", "", ck_consts[0]["sqltext"] ) eq_(ck_consts, [{"sqltext": "x > 0", "name": "newck"}]) @testing.combinations(("always",), ("auto",), argnames="recreate") @config.requirements.check_constraint_reflection def test_drop_ck_constraint(self, recreate): self._ck_constraint_fixture() with self.op.batch_alter_table( "ck_table", recreate=recreate ) as batch_op: batch_op.drop_constraint("ck", "check") ck_consts = inspect(self.conn).get_check_constraints("ck_table") eq_(ck_consts, []) @config.requirements.unnamed_constraints def test_drop_foreign_key(self): bar = Table( "bar", self.metadata, Column("id", Integer, primary_key=True), Column("foo_id", Integer, ForeignKey("foo.id")), mysql_engine="InnoDB", ) with self.conn.begin(): bar.create(self.conn) self.conn.execute(bar.insert(), {"id": 1, "foo_id": 3}) naming_convention = { "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s" } with self.op.batch_alter_table( "bar", naming_convention=naming_convention ) as batch_op: batch_op.drop_constraint("fk_bar_foo_id_foo", type_="foreignkey") eq_(inspect(self.conn).get_foreign_keys("bar"), []) def test_drop_column_fk_recreate(self): with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.drop_column("data") self._assert_data( [ {"id": 1, "x": 5}, {"id": 2, "x": 6}, {"id": 3, "x": 7}, {"id": 4, "x": 8}, {"id": 5, "x": 9}, ] ) def _assert_table_comment(self, tname, comment): insp = inspect(self.conn) tcomment = insp.get_table_comment(tname) eq_(tcomment, {"text": comment}) @testing.combinations(("always",), ("auto",), argnames="recreate") def test_add_uq(self, recreate): with self.op.batch_alter_table("foo", recreate=recreate) as batch_op: batch_op.create_unique_constraint("newuk", ["x"]) uq_consts = inspect(self.conn).get_unique_constraints("foo") eq_( [ {"name": uc["name"], "column_names": uc["column_names"]} for uc in uq_consts ], [{"name": "newuk", "column_names": ["x"]}], ) @testing.combinations(("always",), ("auto",), argnames="recreate") def test_add_uq_plus_col(self, recreate): with self.op.batch_alter_table("foo", recreate=recreate) as batch_op: batch_op.add_column(Column("y", Integer)) batch_op.create_unique_constraint("newuk", ["x", "y"]) uq_consts = inspect(self.conn).get_unique_constraints("foo") eq_( [ {"name": uc["name"], "column_names": uc["column_names"]} for uc in uq_consts ], [{"name": "newuk", "column_names": ["x", "y"]}], ) @config.requirements.comments def test_add_table_comment(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.create_table_comment("some comment") self._assert_table_comment("foo", "some comment") with self.op.batch_alter_table("foo") as batch_op: batch_op.create_table_comment( "some new comment", existing_comment="some comment" ) self._assert_table_comment("foo", "some new comment") @config.requirements.comments def test_drop_table_comment(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.create_table_comment("some comment") with self.op.batch_alter_table("foo") as batch_op: batch_op.drop_table_comment(existing_comment="some comment") self._assert_table_comment("foo", None) def _assert_column_comment(self, tname, cname, comment): insp = inspect(self.conn) cols = {col["name"]: col for col in insp.get_columns(tname)} eq_(cols[cname]["comment"], comment) @config.requirements.comments def test_add_column_comment(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.add_column(Column("y", Integer, comment="some comment")) self._assert_column_comment("foo", "y", "some comment") self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "y": None}, {"id": 2, "data": "22", "x": 6, "y": None}, {"id": 3, "data": "8.5", "x": 7, "y": None}, {"id": 4, "data": "9.46", "x": 8, "y": None}, {"id": 5, "data": "d5", "x": 9, "y": None}, ] ) @config.requirements.comments def test_add_column_comment_recreate(self): with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.add_column(Column("y", Integer, comment="some comment")) self._assert_column_comment("foo", "y", "some comment") self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "y": None}, {"id": 2, "data": "22", "x": 6, "y": None}, {"id": 3, "data": "8.5", "x": 7, "y": None}, {"id": 4, "data": "9.46", "x": 8, "y": None}, {"id": 5, "data": "d5", "x": 9, "y": None}, ] ) @config.requirements.comments def test_alter_column_comment(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.alter_column( "x", existing_type=Integer(), comment="some comment" ) self._assert_column_comment("foo", "x", "some comment") self._assert_data( [ {"id": 1, "data": "d1", "x": 5}, {"id": 2, "data": "22", "x": 6}, {"id": 3, "data": "8.5", "x": 7}, {"id": 4, "data": "9.46", "x": 8}, {"id": 5, "data": "d5", "x": 9}, ] ) @config.requirements.comments def test_alter_column_comment_recreate(self): with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.alter_column("x", comment="some comment") self._assert_column_comment("foo", "x", "some comment") self._assert_data( [ {"id": 1, "data": "d1", "x": 5}, {"id": 2, "data": "22", "x": 6}, {"id": 3, "data": "8.5", "x": 7}, {"id": 4, "data": "9.46", "x": 8}, {"id": 5, "data": "d5", "x": 9}, ] ) def test_rename_column(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.alter_column("x", new_column_name="y") self._assert_data( [ {"id": 1, "data": "d1", "y": 5}, {"id": 2, "data": "22", "y": 6}, {"id": 3, "data": "8.5", "y": 7}, {"id": 4, "data": "9.46", "y": 8}, {"id": 5, "data": "d5", "y": 9}, ] ) def test_rename_column_boolean(self): bar = Table( "bar", self.metadata, Column("id", Integer, primary_key=True), Column("flag", Boolean(create_constraint=True)), mysql_engine="InnoDB", ) with self.conn.begin(): bar.create(self.conn) self.conn.execute(bar.insert(), {"id": 1, "flag": True}) self.conn.execute(bar.insert(), {"id": 2, "flag": False}) with self.op.batch_alter_table("bar") as batch_op: batch_op.alter_column( "flag", new_column_name="bflag", existing_type=Boolean ) self._assert_data( [{"id": 1, "bflag": True}, {"id": 2, "bflag": False}], "bar" ) # @config.requirements.check_constraint_reflection def test_rename_column_boolean_named_ck(self): bar = Table( "bar", self.metadata, Column("id", Integer, primary_key=True), Column("flag", Boolean(create_constraint=True, name="ck1")), mysql_engine="InnoDB", ) with self.conn.begin(): bar.create(self.conn) self.conn.execute(bar.insert(), {"id": 1, "flag": True}) self.conn.execute(bar.insert(), {"id": 2, "flag": False}) with self.op.batch_alter_table("bar", recreate="always") as batch_op: batch_op.alter_column( "flag", new_column_name="bflag", existing_type=Boolean(create_constraint=True, name="ck1"), ) self._assert_data( [{"id": 1, "bflag": True}, {"id": 2, "bflag": False}], "bar" ) @config.requirements.non_native_boolean def test_rename_column_non_native_boolean_no_ck(self): bar = Table( "bar", self.metadata, Column("id", Integer, primary_key=True), Column("flag", Boolean(create_constraint=False)), mysql_engine="InnoDB", ) with self.conn.begin(): bar.create(self.conn) self.conn.execute(bar.insert(), {"id": 1, "flag": True}) self.conn.execute(bar.insert(), {"id": 2, "flag": False}) self.conn.execute( # override Boolean type which as of 1.1 coerces numerics # to 1/0 text("insert into bar (id, flag) values (:id, :flag)"), {"id": 3, "flag": 5}, ) with self.op.batch_alter_table( "bar", reflect_args=[Column("flag", Boolean(create_constraint=False))], ) as batch_op: batch_op.alter_column( "flag", new_column_name="bflag", existing_type=Boolean ) self._assert_data( [ {"id": 1, "bflag": True}, {"id": 2, "bflag": False}, {"id": 3, "bflag": 5}, ], "bar", ) def test_drop_column_pk(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.drop_column("id") self._assert_data( [ {"data": "d1", "x": 5}, {"data": "22", "x": 6}, {"data": "8.5", "x": 7}, {"data": "9.46", "x": 8}, {"data": "d5", "x": 9}, ] ) def test_rename_column_pk(self): with self.op.batch_alter_table("foo") as batch_op: batch_op.alter_column("id", new_column_name="ident") self._assert_data( [ {"ident": 1, "data": "d1", "x": 5}, {"ident": 2, "data": "22", "x": 6}, {"ident": 3, "data": "8.5", "x": 7}, {"ident": 4, "data": "9.46", "x": 8}, {"ident": 5, "data": "d5", "x": 9}, ] ) def test_add_column_auto(self): # note this uses ALTER with self.op.batch_alter_table("foo") as batch_op: batch_op.add_column( Column("data2", String(50), server_default="hi") ) self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "data2": "hi"}, {"id": 2, "data": "22", "x": 6, "data2": "hi"}, {"id": 3, "data": "8.5", "x": 7, "data2": "hi"}, {"id": 4, "data": "9.46", "x": 8, "data2": "hi"}, {"id": 5, "data": "d5", "x": 9, "data2": "hi"}, ] ) eq_( [col["name"] for col in inspect(config.db).get_columns("foo")], ["id", "data", "x", "data2"], ) def test_add_column_auto_server_default_calculated(self): """test #883""" with self.op.batch_alter_table("foo") as batch_op: batch_op.add_column( Column( "data2", DateTime(), server_default=self._datetime_server_default_fixture(), ) ) self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "data2": mock.ANY}, {"id": 2, "data": "22", "x": 6, "data2": mock.ANY}, {"id": 3, "data": "8.5", "x": 7, "data2": mock.ANY}, {"id": 4, "data": "9.46", "x": 8, "data2": mock.ANY}, {"id": 5, "data": "d5", "x": 9, "data2": mock.ANY}, ] ) eq_( [col["name"] for col in inspect(self.conn).get_columns("foo")], ["id", "data", "x", "data2"], ) @testing.combinations((True,), (False,)) @testing.exclusions.only_on("sqlite") @config.requirements.computed_columns def test_add_column_auto_generated(self, persisted): """test #883""" with self.op.batch_alter_table("foo") as batch_op: batch_op.add_column( Column( "data2", Integer, Computed("1 + 1", persisted=persisted) ) ) self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "data2": 2}, {"id": 2, "data": "22", "x": 6, "data2": 2}, {"id": 3, "data": "8.5", "x": 7, "data2": 2}, {"id": 4, "data": "9.46", "x": 8, "data2": 2}, {"id": 5, "data": "d5", "x": 9, "data2": 2}, ] ) eq_( [col["name"] for col in inspect(self.conn).get_columns("foo")], ["id", "data", "x", "data2"], ) @config.requirements.identity_columns def test_add_column_auto_identity(self): """test #883""" self._no_pk_fixture() with self.op.batch_alter_table("nopk") as batch_op: batch_op.add_column(Column("id", Integer, Identity())) self._assert_data( [ {"a": 1, "b": 2, "c": 3, "id": 1}, {"a": 2, "b": 4, "c": 5, "id": 2}, ], tablename="nopk", ) eq_( [col["name"] for col in inspect(self.conn).get_columns("foo")], ["id", "data", "x"], ) def test_add_column_insert_before_recreate(self): with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.add_column( Column("data2", String(50), server_default="hi"), insert_before="data", ) self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "data2": "hi"}, {"id": 2, "data": "22", "x": 6, "data2": "hi"}, {"id": 3, "data": "8.5", "x": 7, "data2": "hi"}, {"id": 4, "data": "9.46", "x": 8, "data2": "hi"}, {"id": 5, "data": "d5", "x": 9, "data2": "hi"}, ] ) eq_( [col["name"] for col in inspect(self.conn).get_columns("foo")], ["id", "data2", "data", "x"], ) def test_add_column_insert_after_recreate(self): with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.add_column( Column("data2", String(50), server_default="hi"), insert_after="data", ) self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "data2": "hi"}, {"id": 2, "data": "22", "x": 6, "data2": "hi"}, {"id": 3, "data": "8.5", "x": 7, "data2": "hi"}, {"id": 4, "data": "9.46", "x": 8, "data2": "hi"}, {"id": 5, "data": "d5", "x": 9, "data2": "hi"}, ] ) eq_( [col["name"] for col in inspect(self.conn).get_columns("foo")], ["id", "data", "data2", "x"], ) def test_add_column_insert_before_raise_on_alter(self): def go(): with self.op.batch_alter_table("foo") as batch_op: batch_op.add_column( Column("data2", String(50), server_default="hi"), insert_before="data", ) assert_raises_message( alembic_exc.CommandError, "Can't specify insert_before or insert_after when using ALTER", go, ) def test_add_column_recreate(self): with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.add_column( Column("data2", String(50), server_default="hi") ) self._assert_data( [ {"id": 1, "data": "d1", "x": 5, "data2": "hi"}, {"id": 2, "data": "22", "x": 6, "data2": "hi"}, {"id": 3, "data": "8.5", "x": 7, "data2": "hi"}, {"id": 4, "data": "9.46", "x": 8, "data2": "hi"}, {"id": 5, "data": "d5", "x": 9, "data2": "hi"}, ] ) eq_( [col["name"] for col in inspect(self.conn).get_columns("foo")], ["id", "data", "x", "data2"], ) def test_create_drop_index(self): insp = inspect(self.conn) eq_(insp.get_indexes("foo"), []) with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.create_index("ix_data", ["data"], unique=True) self._assert_data( [ {"id": 1, "data": "d1", "x": 5}, {"id": 2, "data": "22", "x": 6}, {"id": 3, "data": "8.5", "x": 7}, {"id": 4, "data": "9.46", "x": 8}, {"id": 5, "data": "d5", "x": 9}, ] ) insp = inspect(self.conn) eq_( [ dict( unique=ix["unique"], name=ix["name"], column_names=ix["column_names"], ) for ix in insp.get_indexes("foo") ], [{"unique": True, "name": "ix_data", "column_names": ["data"]}], ) with self.op.batch_alter_table("foo", recreate="always") as batch_op: batch_op.drop_index("ix_data") insp = inspect(self.conn) eq_(insp.get_indexes("foo"), []) class BatchRoundTripMySQLTest(BatchRoundTripTest): __only_on__ = "mysql", "mariadb" __backend__ = True def _datetime_server_default_fixture(self): return func.current_timestamp() @exclusions.fails() def test_drop_pk_col_readd_pk_col(self): super(BatchRoundTripMySQLTest, self).test_drop_pk_col_readd_pk_col() @exclusions.fails() def test_drop_pk_col_readd_col_also_pk_const(self): super( BatchRoundTripMySQLTest, self ).test_drop_pk_col_readd_col_also_pk_const() @exclusions.fails() def test_rename_column_pk(self): super(BatchRoundTripMySQLTest, self).test_rename_column_pk() @exclusions.fails() def test_rename_column(self): super(BatchRoundTripMySQLTest, self).test_rename_column() @exclusions.fails() def test_change_type(self): super(BatchRoundTripMySQLTest, self).test_change_type() def test_create_drop_index(self): super(BatchRoundTripMySQLTest, self).test_create_drop_index() # fails on mariadb 10.2, succeeds on 10.3 @exclusions.fails_if(config.requirements.mysql_check_col_name_change) def test_rename_column_boolean(self): super(BatchRoundTripMySQLTest, self).test_rename_column_boolean() def test_change_type_boolean_to_int(self): super(BatchRoundTripMySQLTest, self).test_change_type_boolean_to_int() def test_change_type_int_to_boolean(self): super(BatchRoundTripMySQLTest, self).test_change_type_int_to_boolean() class BatchRoundTripPostgresqlTest(BatchRoundTripTest): __only_on__ = "postgresql" __backend__ = True def _native_boolean_fixture(self): t = Table( "has_native_bool", self.metadata, Column( "x", Boolean(create_constraint=True), server_default="false", nullable=False, ), Column("y", Integer), ) with self.conn.begin(): t.create(self.conn) def _datetime_server_default_fixture(self): return func.current_timestamp() @exclusions.fails() def test_drop_pk_col_readd_pk_col(self): super( BatchRoundTripPostgresqlTest, self ).test_drop_pk_col_readd_pk_col() @exclusions.fails() def test_drop_pk_col_readd_col_also_pk_const(self): super( BatchRoundTripPostgresqlTest, self ).test_drop_pk_col_readd_col_also_pk_const() @exclusions.fails() def test_change_type(self): super(BatchRoundTripPostgresqlTest, self).test_change_type() def test_create_drop_index(self): super(BatchRoundTripPostgresqlTest, self).test_create_drop_index() @exclusions.fails() def test_change_type_int_to_boolean(self): super( BatchRoundTripPostgresqlTest, self ).test_change_type_int_to_boolean() @exclusions.fails() def test_change_type_boolean_to_int(self): super( BatchRoundTripPostgresqlTest, self ).test_change_type_boolean_to_int() def test_add_col_table_has_native_boolean(self): self._native_boolean_fixture() # to ensure test coverage on SQLAlchemy 1.4 and above, # force the create_constraint flag to True even though it # defaults to false in 1.4. this test wants to ensure that the # "should create" rule is consulted def listen_for_reflect(inspector, table, column_info): if isinstance(column_info["type"], Boolean): column_info["type"].create_constraint = True with self.op.batch_alter_table( "has_native_bool", recreate="always", reflect_kwargs={ "listeners": [("column_reflect", listen_for_reflect)] }, ) as batch_op: batch_op.add_column(Column("data", Integer)) insp = inspect(self.conn) eq_( [ c["type"]._type_affinity for c in insp.get_columns("has_native_bool") if c["name"] == "data" ], [Integer], ) eq_( [ c["type"]._type_affinity for c in insp.get_columns("has_native_bool") if c["name"] == "x" ], [Boolean], ) alembic-rel_1_7_6/tests/test_bulk_insert.py000066400000000000000000000242431417624537100212400ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import text from sqlalchemy.sql import column from sqlalchemy.sql import table from sqlalchemy.types import TypeEngine from alembic import op from alembic.migration import MigrationContext from alembic.testing import assert_raises_message from alembic.testing import config from alembic.testing import eq_ from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase class BulkInsertTest(TestBase): def _table_fixture(self, dialect, as_sql): context = op_fixture(dialect, as_sql) t1 = table( "ins_table", column("id", Integer), column("v1", String()), column("v2", String()), ) return context, t1 def _big_t_table_fixture(self, dialect, as_sql): context = op_fixture(dialect, as_sql) t1 = Table( "ins_table", MetaData(), Column("id", Integer, primary_key=True), Column("v1", String()), Column("v2", String()), ) return context, t1 def _test_bulk_insert(self, dialect, as_sql): context, t1 = self._table_fixture(dialect, as_sql) op.bulk_insert( t1, [ {"id": 1, "v1": "row v1", "v2": "row v5"}, {"id": 2, "v1": "row v2", "v2": "row v6"}, {"id": 3, "v1": "row v3", "v2": "row v7"}, {"id": 4, "v1": "row v4", "v2": "row v8"}, ], ) return context def _test_bulk_insert_single(self, dialect, as_sql): context, t1 = self._table_fixture(dialect, as_sql) op.bulk_insert(t1, [{"id": 1, "v1": "row v1", "v2": "row v5"}]) return context def _test_bulk_insert_single_bigt(self, dialect, as_sql): context, t1 = self._big_t_table_fixture(dialect, as_sql) op.bulk_insert(t1, [{"id": 1, "v1": "row v1", "v2": "row v5"}]) return context def test_bulk_insert(self): context = self._test_bulk_insert("default", False) context.assert_( "INSERT INTO ins_table (id, v1, v2) VALUES (:id, :v1, :v2)" ) def test_bulk_insert_wrong_cols(self): context = op_fixture("postgresql") t1 = table( "ins_table", column("id", Integer), column("v1", String()), column("v2", String()), ) op.bulk_insert(t1, [{"v1": "row v1"}]) context.assert_( "INSERT INTO ins_table (id, v1, v2) " "VALUES (%(id)s, %(v1)s, %(v2)s)" ) def test_bulk_insert_no_rows(self): context, t1 = self._table_fixture("default", False) op.bulk_insert(t1, []) context.assert_() def test_bulk_insert_pg(self): context = self._test_bulk_insert("postgresql", False) context.assert_( "INSERT INTO ins_table (id, v1, v2) " "VALUES (%(id)s, %(v1)s, %(v2)s)" ) def test_bulk_insert_pg_single(self): context = self._test_bulk_insert_single("postgresql", False) context.assert_( "INSERT INTO ins_table (id, v1, v2) " "VALUES (%(id)s, %(v1)s, %(v2)s)" ) def test_bulk_insert_pg_single_as_sql(self): context = self._test_bulk_insert_single("postgresql", True) context.assert_( "INSERT INTO ins_table (id, v1, v2) VALUES (1, 'row v1', 'row v5')" ) def test_bulk_insert_pg_single_big_t_as_sql(self): context = self._test_bulk_insert_single_bigt("postgresql", True) context.assert_( "INSERT INTO ins_table (id, v1, v2) " "VALUES (1, 'row v1', 'row v5')" ) def test_bulk_insert_mssql(self): context = self._test_bulk_insert("mssql", False) context.assert_( "INSERT INTO ins_table (id, v1, v2) VALUES (:id, :v1, :v2)" ) def test_bulk_insert_inline_literal_as_sql(self): context = op_fixture("postgresql", True) class MyType(TypeEngine): pass t1 = table("t", column("id", Integer), column("data", MyType())) op.bulk_insert( t1, [ {"id": 1, "data": op.inline_literal("d1")}, {"id": 2, "data": op.inline_literal("d2")}, ], ) context.assert_( "INSERT INTO t (id, data) VALUES (1, 'd1')", "INSERT INTO t (id, data) VALUES (2, 'd2')", ) def test_bulk_insert_as_sql(self): context = self._test_bulk_insert("default", True) context.assert_( "INSERT INTO ins_table (id, v1, v2) " "VALUES (1, 'row v1', 'row v5')", "INSERT INTO ins_table (id, v1, v2) " "VALUES (2, 'row v2', 'row v6')", "INSERT INTO ins_table (id, v1, v2) " "VALUES (3, 'row v3', 'row v7')", "INSERT INTO ins_table (id, v1, v2) " "VALUES (4, 'row v4', 'row v8')", ) def test_bulk_insert_as_sql_pg(self): context = self._test_bulk_insert("postgresql", True) context.assert_( "INSERT INTO ins_table (id, v1, v2) " "VALUES (1, 'row v1', 'row v5')", "INSERT INTO ins_table (id, v1, v2) " "VALUES (2, 'row v2', 'row v6')", "INSERT INTO ins_table (id, v1, v2) " "VALUES (3, 'row v3', 'row v7')", "INSERT INTO ins_table (id, v1, v2) " "VALUES (4, 'row v4', 'row v8')", ) def test_bulk_insert_as_sql_mssql(self): context = self._test_bulk_insert("mssql", True) # SQL server requires IDENTITY_INSERT # TODO: figure out if this is safe to enable for a table that # doesn't have an IDENTITY column context.assert_( "SET IDENTITY_INSERT ins_table ON", "GO", "INSERT INTO ins_table (id, v1, v2) " "VALUES (1, 'row v1', 'row v5')", "GO", "INSERT INTO ins_table (id, v1, v2) " "VALUES (2, 'row v2', 'row v6')", "GO", "INSERT INTO ins_table (id, v1, v2) " "VALUES (3, 'row v3', 'row v7')", "GO", "INSERT INTO ins_table (id, v1, v2) " "VALUES (4, 'row v4', 'row v8')", "GO", "SET IDENTITY_INSERT ins_table OFF", "GO", ) def test_bulk_insert_from_new_table(self): context = op_fixture("postgresql", True) t1 = op.create_table( "ins_table", Column("id", Integer), Column("v1", String()), Column("v2", String()), ) op.bulk_insert( t1, [ {"id": 1, "v1": "row v1", "v2": "row v5"}, {"id": 2, "v1": "row v2", "v2": "row v6"}, ], ) context.assert_( "CREATE TABLE ins_table (id INTEGER, v1 VARCHAR, v2 VARCHAR)", "INSERT INTO ins_table (id, v1, v2) VALUES " "(1, 'row v1', 'row v5')", "INSERT INTO ins_table (id, v1, v2) VALUES " "(2, 'row v2', 'row v6')", ) def test_invalid_format(self): context, t1 = self._table_fixture("sqlite", False) assert_raises_message( TypeError, "List expected", op.bulk_insert, t1, {"id": 5} ) assert_raises_message( TypeError, "List of dictionaries expected", op.bulk_insert, t1, [(5,)], ) class RoundTripTest(TestBase): __only_on__ = "sqlite" def setUp(self): self.conn = config.db.connect() with self.conn.begin(): self.conn.execute( text( """ create table foo( id integer primary key, data varchar(50), x integer ) """ ) ) context = MigrationContext.configure(self.conn) self.op = op.Operations(context) self.t1 = table("foo", column("id"), column("data"), column("x")) self.trans = self.conn.begin() def tearDown(self): self.trans.rollback() with self.conn.begin(): self.conn.execute(text("drop table foo")) self.conn.close() def test_single_insert_round_trip(self): self.op.bulk_insert(self.t1, [{"data": "d1", "x": "x1"}]) eq_( self.conn.execute(text("select id, data, x from foo")).fetchall(), [(1, "d1", "x1")], ) def test_bulk_insert_round_trip(self): self.op.bulk_insert( self.t1, [ {"data": "d1", "x": "x1"}, {"data": "d2", "x": "x2"}, {"data": "d3", "x": "x3"}, ], ) eq_( self.conn.execute(text("select id, data, x from foo")).fetchall(), [(1, "d1", "x1"), (2, "d2", "x2"), (3, "d3", "x3")], ) def test_bulk_insert_inline_literal(self): class MyType(TypeEngine): pass t1 = table("foo", column("id", Integer), column("data", MyType())) self.op.bulk_insert( t1, [ {"id": 1, "data": self.op.inline_literal("d1")}, {"id": 2, "data": self.op.inline_literal("d2")}, ], multiinsert=False, ) eq_( self.conn.execute(text("select id, data from foo")).fetchall(), [(1, "d1"), (2, "d2")], ) def test_bulk_insert_from_new_table(self): t1 = self.op.create_table( "ins_table", Column("id", Integer), Column("v1", String()), Column("v2", String()), ) self.op.bulk_insert( t1, [ {"id": 1, "v1": "row v1", "v2": "row v5"}, {"id": 2, "v1": "row v2", "v2": "row v6"}, ], ) eq_( self.conn.execute( text("select id, v1, v2 from ins_table order by id") ).fetchall(), [(1, u"row v1", u"row v5"), (2, u"row v2", u"row v6")], ) alembic-rel_1_7_6/tests/test_command.py000066400000000000000000001223111417624537100203300ustar00rootroot00000000000000from contextlib import contextmanager import inspect from io import BytesIO from io import StringIO from io import TextIOWrapper import os import re from typing import cast from sqlalchemy import exc as sqla_exc from sqlalchemy import text from sqlalchemy.engine import Engine from alembic import __version__ from alembic import command from alembic import config from alembic import util from alembic.script import ScriptDirectory from alembic.testing import assert_raises from alembic.testing import assert_raises_message from alembic.testing import eq_ from alembic.testing import is_false from alembic.testing import is_true from alembic.testing import mock from alembic.testing.env import _get_staging_directory from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import _sqlite_file_db from alembic.testing.env import _sqlite_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import env_file_fixture from alembic.testing.env import multi_heads_fixture from alembic.testing.env import staging_env from alembic.testing.env import three_rev_fixture from alembic.testing.env import write_script from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import capture_engine_context_buffer from alembic.testing.fixtures import TestBase from alembic.util.sqla_compat import _connectable_has_table class _BufMixin: def _buf_fixture(self): # try to simulate how sys.stdout looks - we send it u'' # but then it's trying to encode to something. buf = BytesIO() wrapper = TextIOWrapper(buf, encoding="ascii", line_buffering=True) wrapper.getvalue = buf.getvalue return wrapper class HistoryTest(_BufMixin, TestBase): @classmethod def setup_class(cls): cls.env = staging_env() cls.cfg = _sqlite_testing_config() cls.a, cls.b, cls.c = three_rev_fixture(cls.cfg) cls._setup_env_file() @classmethod def teardown_class(cls): clear_staging_env() def teardown(self): self.cfg.set_main_option("revision_environment", "false") @classmethod def _setup_env_file(self): env_file_fixture( r""" from sqlalchemy import MetaData, engine_from_config target_metadata = MetaData() engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.') connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): config.stdout.write(u"environment included OK\n") context.run_migrations() finally: connection.close() """ ) def _eq_cmd_output(self, buf, expected, env_token=False, currents=()): script = ScriptDirectory.from_config(self.cfg) # test default encode/decode behavior as well, # rev B has a non-ascii char in it + a coding header. assert_lines = [] for _id in expected: rev = script.get_revision(_id) if _id in currents: rev._db_current_indicator = True assert_lines.append(rev.log_entry) if env_token: assert_lines.insert(0, "environment included OK") actual = ( buf.getvalue() .decode("ascii", "replace") .replace(os.linesep, "\n") .strip() ) eq_( actual, "\n".join(assert_lines) .encode("ascii", "replace") .decode("ascii") .strip(), ) def test_history_full(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a]) def test_history_full_environment(self): self.cfg.stdout = buf = self._buf_fixture() self.cfg.set_main_option("revision_environment", "true") command.history(self.cfg, verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a], env_token=True) def test_history_num_range(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "%s:%s" % (self.a, self.b), verbose=True) self._eq_cmd_output(buf, [self.b, self.a]) def test_history_num_range_environment(self): self.cfg.stdout = buf = self._buf_fixture() self.cfg.set_main_option("revision_environment", "true") command.history(self.cfg, "%s:%s" % (self.a, self.b), verbose=True) self._eq_cmd_output(buf, [self.b, self.a], env_token=True) def test_history_base_to_num(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, ":%s" % (self.b), verbose=True) self._eq_cmd_output(buf, [self.b, self.a]) def test_history_num_to_head(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "%s:" % (self.a), verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a]) def test_history_num_to_head_environment(self): self.cfg.stdout = buf = self._buf_fixture() self.cfg.set_main_option("revision_environment", "true") command.history(self.cfg, "%s:" % (self.a), verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a], env_token=True) def test_history_num_plus_relative(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "%s:+2" % (self.a), verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a]) def test_history_relative_to_num(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "-2:%s" % (self.c), verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a]) def test_history_too_large_relative_to_num(self): self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "-5:%s" % (self.c), verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a]) def test_history_current_to_head_as_b(self): command.stamp(self.cfg, self.b) self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "current:", verbose=True) self._eq_cmd_output(buf, [self.c, self.b], env_token=True) def test_history_current_to_head_as_base(self): command.stamp(self.cfg, "base") self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, "current:", verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a], env_token=True) def test_history_include_env(self): self.cfg.stdout = buf = self._buf_fixture() self.cfg.set_main_option("revision_environment", "true") command.history(self.cfg, verbose=True) self._eq_cmd_output(buf, [self.c, self.b, self.a], env_token=True) def test_history_indicate_current(self): command.stamp(self.cfg, (self.b,)) self.cfg.stdout = buf = self._buf_fixture() command.history(self.cfg, indicate_current=True, verbose=True) self._eq_cmd_output( buf, [self.c, self.b, self.a], currents=(self.b,), env_token=True ) class CurrentTest(_BufMixin, TestBase): @classmethod def setup_class(cls): cls.bind = _sqlite_file_db() cls.env = env = staging_env() cls.cfg = _sqlite_testing_config() cls.a1 = env.generate_revision("a1", "a1") cls.a2 = env.generate_revision("a2", "a2") cls.a3 = env.generate_revision("a3", "a3") cls.b1 = env.generate_revision("b1", "b1", head="base") cls.b2 = env.generate_revision("b2", "b2", head="b1", depends_on="a2") cls.b3 = env.generate_revision("b3", "b3", head="b2") @classmethod def teardown_class(cls): clear_staging_env() @contextmanager def _assert_lines(self, revs): self.cfg.stdout = buf = self._buf_fixture() yield lines = set( [ re.match(r"(^.\w)", elem).group(1) for elem in re.split( "\n", buf.getvalue().decode("ascii", "replace").strip() ) if elem ] ) eq_(lines, set(revs)) def test_doesnt_create_alembic_version(self): command.current(self.cfg) engine = self.bind with engine.connect() as conn: is_false(_connectable_has_table(conn, "alembic_version", None)) def test_no_current(self): command.stamp(self.cfg, ()) with self._assert_lines([]): command.current(self.cfg) def test_plain_current(self): command.stamp(self.cfg, ()) command.stamp(self.cfg, self.a3.revision) with self._assert_lines(["a3"]): command.current(self.cfg) def test_current_obfuscate_password(self): eq_( util.obfuscate_url_pw("postgresql://scott:tiger@localhost/test"), "postgresql://scott:XXXXX@localhost/test", ) def test_two_heads(self): command.stamp(self.cfg, ()) command.stamp(self.cfg, (self.a1.revision, self.b1.revision)) with self._assert_lines(["a1", "b1"]): command.current(self.cfg) def test_heads_one_is_dependent(self): command.stamp(self.cfg, ()) command.stamp(self.cfg, (self.b2.revision,)) with self._assert_lines(["a2", "b2"]): command.current(self.cfg) def test_heads_upg(self): command.stamp(self.cfg, (self.b2.revision,)) command.upgrade(self.cfg, (self.b3.revision)) with self._assert_lines(["a2", "b3"]): command.current(self.cfg) class RevisionTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _sqlite_testing_config() def tearDown(self): clear_staging_env() def _env_fixture(self, version_table_pk=True): env_file_fixture( """ from sqlalchemy import MetaData, engine_from_config target_metadata = MetaData() engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.') connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table_pk=%r ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() """ % (version_table_pk,) ) def test_create_rev_plain_db_not_up_to_date(self): self._env_fixture() command.revision(self.cfg) command.revision(self.cfg) # no problem def test_create_rev_autogen(self): self._env_fixture() command.revision(self.cfg, autogenerate=True) def test_create_rev_autogen_db_not_up_to_date(self): self._env_fixture() assert command.revision(self.cfg) assert_raises_message( util.CommandError, "Target database is not up to date.", command.revision, self.cfg, autogenerate=True, ) def test_create_rev_autogen_db_not_up_to_date_multi_heads(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) rev3a = command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.upgrade(self.cfg, "heads") command.revision(self.cfg, head=rev3a.revision) assert_raises_message( util.CommandError, "Target database is not up to date.", command.revision, self.cfg, autogenerate=True, ) def test_create_rev_plain_db_not_up_to_date_multi_heads(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) rev3a = command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.upgrade(self.cfg, "heads") command.revision(self.cfg, head=rev3a.revision) assert_raises_message( util.CommandError, "Multiple heads are present; please specify the head revision " "on which the new revision should be based, or perform a merge.", command.revision, self.cfg, ) def test_create_rev_autogen_need_to_select_head(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.upgrade(self.cfg, "heads") # there's multiple heads present assert_raises_message( util.CommandError, "Multiple heads are present; please specify the head revision " "on which the new revision should be based, or perform a merge.", command.revision, self.cfg, autogenerate=True, ) def test_pk_constraint_normally_prevents_dupe_rows(self): self._env_fixture() command.revision(self.cfg) r2 = command.revision(self.cfg) db = _sqlite_file_db() command.upgrade(self.cfg, "head") with db.connect() as conn: assert_raises( sqla_exc.IntegrityError, conn.execute, text( "insert into alembic_version values ('%s')" % r2.revision ), ) def test_err_correctly_raised_on_dupe_rows_no_pk(self): self._env_fixture(version_table_pk=False) command.revision(self.cfg) r2 = command.revision(self.cfg) db = _sqlite_file_db() command.upgrade(self.cfg, "head") with db.begin() as conn: conn.execute( text("insert into alembic_version values ('%s')" % r2.revision) ) assert_raises_message( util.CommandError, "Online migration expected to match one row when " "updating .* in 'alembic_version'; 2 found", command.downgrade, self.cfg, "-1", ) def test_create_rev_plain_need_to_select_head(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.upgrade(self.cfg, "heads") # there's multiple heads present assert_raises_message( util.CommandError, "Multiple heads are present; please specify the head revision " "on which the new revision should be based, or perform a merge.", command.revision, self.cfg, ) def test_create_rev_plain_post_merge(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.merge(self.cfg, "heads") command.revision(self.cfg) def test_create_rev_autogenerate_post_merge(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.merge(self.cfg, "heads") command.upgrade(self.cfg, "heads") command.revision(self.cfg, autogenerate=True) def test_create_rev_depends_on(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) rev3 = command.revision(self.cfg, depends_on=rev2.revision) eq_(rev3._resolved_dependencies, (rev2.revision,)) rev4 = command.revision( self.cfg, depends_on=[rev2.revision, rev3.revision] ) eq_(rev4._resolved_dependencies, (rev2.revision, rev3.revision)) def test_create_rev_depends_on_branch_label(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg, branch_label="foobar") rev3 = command.revision(self.cfg, depends_on="foobar") eq_(rev3.dependencies, "foobar") eq_(rev3._resolved_dependencies, (rev2.revision,)) def test_create_rev_depends_on_partial_revid(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) assert len(rev2.revision) > 7 rev3 = command.revision(self.cfg, depends_on=rev2.revision[0:4]) eq_(rev3.dependencies, rev2.revision) eq_(rev3._resolved_dependencies, (rev2.revision,)) def test_create_rev_invalid_depends_on(self): self._env_fixture() command.revision(self.cfg) assert_raises_message( util.CommandError, "Can't locate revision identified by 'invalid'", command.revision, self.cfg, depends_on="invalid", ) def test_create_rev_autogenerate_db_not_up_to_date_post_merge(self): self._env_fixture() command.revision(self.cfg) rev2 = command.revision(self.cfg) command.revision(self.cfg) command.revision(self.cfg, head=rev2.revision, splice=True) command.upgrade(self.cfg, "heads") command.merge(self.cfg, "heads") assert_raises_message( util.CommandError, "Target database is not up to date.", command.revision, self.cfg, autogenerate=True, ) def test_nonsensical_sql_mode_autogen(self): self._env_fixture() assert_raises_message( util.CommandError, "Using --sql with --autogenerate does not make any sense", command.revision, self.cfg, autogenerate=True, sql=True, ) def test_nonsensical_sql_no_env(self): self._env_fixture() assert_raises_message( util.CommandError, "Using --sql with the revision command when revision_environment " "is not configured does not make any sense", command.revision, self.cfg, sql=True, ) def test_sensical_sql_w_env(self): self._env_fixture() self.cfg.set_main_option("revision_environment", "true") command.revision(self.cfg, sql=True) class _StampTest: def _assert_sql(self, emitted_sql, origin, destinations): ins_expr = ( r"INSERT INTO alembic_version \(version_num\) " r"VALUES \('(.+)'\)" ) expected = [ins_expr for elem in destinations] if origin: expected[0] = ( "UPDATE alembic_version SET version_num='(.+)' WHERE " "alembic_version.version_num = '%s'" % (origin,) ) for line in emitted_sql.split("\n"): if not expected: assert not re.match( ins_expr, line ), "additional inserts were emitted" else: m = re.match(expected[0], line) if m: destinations.remove(m.group(1)) expected.pop(0) assert not expected, "lines remain" class StampMultipleRootsTest(TestBase, _StampTest): def setUp(self): self.env = staging_env() # self.cfg = cfg = _no_sql_testing_config() self.cfg = cfg = _sqlite_testing_config() # cfg.set_main_option("dialect_name", "sqlite") # cfg.remove_main_option("url") self.a1, self.b1, self.c1 = three_rev_fixture(cfg) self.a2, self.b2, self.c2 = three_rev_fixture(cfg) def tearDown(self): clear_staging_env() def test_sql_stamp_heads(self): with capture_context_buffer() as buf: command.stamp(self.cfg, ["heads"], sql=True) self._assert_sql(buf.getvalue(), None, {self.c1, self.c2}) def test_sql_stamp_single_head(self): with capture_context_buffer() as buf: command.stamp(self.cfg, ["%s@head" % self.c1], sql=True) self._assert_sql(buf.getvalue(), None, {self.c1}) class StampMultipleHeadsTest(TestBase, _StampTest): def setUp(self): self.env = staging_env() # self.cfg = cfg = _no_sql_testing_config() self.cfg = cfg = _sqlite_testing_config() # cfg.set_main_option("dialect_name", "sqlite") # cfg.remove_main_option("url") self.a, self.b, self.c = three_rev_fixture(cfg) self.d, self.e, self.f = multi_heads_fixture( cfg, self.a, self.b, self.c ) def tearDown(self): clear_staging_env() def test_sql_stamp_heads(self): with capture_context_buffer() as buf: command.stamp(self.cfg, ["heads"], sql=True) self._assert_sql(buf.getvalue(), None, {self.c, self.e, self.f}) def test_sql_stamp_multi_rev_nonsensical(self): with capture_context_buffer() as buf: command.stamp(self.cfg, [self.a, self.e, self.f], sql=True) # TODO: this shouldn't be possible, because e/f require b as a # dependency self._assert_sql(buf.getvalue(), None, {self.a, self.e, self.f}) def test_sql_stamp_multi_rev_from_multi_base_nonsensical(self): with capture_context_buffer() as buf: command.stamp( self.cfg, ["base:%s" % self.a, "base:%s" % self.e, "base:%s" % self.f], sql=True, ) # TODO: this shouldn't be possible, because e/f require b as a # dependency self._assert_sql(buf.getvalue(), None, {self.a, self.e, self.f}) def test_online_stamp_multi_rev_nonsensical(self): with capture_engine_context_buffer() as buf: command.stamp(self.cfg, [self.a, self.e, self.f]) # TODO: this shouldn't be possible, because e/f require b as a # dependency self._assert_sql(buf.getvalue(), None, {self.a, self.e, self.f}) def test_online_stamp_multi_rev_from_real_ancestor(self): command.stamp(self.cfg, [self.a]) with capture_engine_context_buffer() as buf: command.stamp(self.cfg, [self.e, self.f]) self._assert_sql(buf.getvalue(), self.a, {self.e, self.f}) def test_online_stamp_version_already_there(self): command.stamp(self.cfg, [self.c, self.e]) with capture_engine_context_buffer() as buf: command.stamp(self.cfg, [self.c, self.e]) self._assert_sql(buf.getvalue(), None, {}) def test_sql_stamp_multi_rev_from_multi_start(self): with capture_context_buffer() as buf: command.stamp( self.cfg, [ "%s:%s" % (self.b, self.c), "%s:%s" % (self.b, self.e), "%s:%s" % (self.b, self.f), ], sql=True, ) self._assert_sql(buf.getvalue(), self.b, {self.c, self.e, self.f}) def test_sql_stamp_heads_symbolic(self): with capture_context_buffer() as buf: command.stamp(self.cfg, ["%s:heads" % self.b], sql=True) self._assert_sql(buf.getvalue(), self.b, {self.c, self.e, self.f}) def test_sql_stamp_different_multi_start(self): assert_raises_message( util.CommandError, "Stamp operation with --sql only supports a single " "starting revision at a time", command.stamp, self.cfg, ["%s:%s" % (self.b, self.c), "%s:%s" % (self.a, self.e)], sql=True, ) def test_stamp_purge(self): command.stamp(self.cfg, [self.a]) eng = _sqlite_file_db() with eng.begin() as conn: result = conn.execute( text("update alembic_version set version_num='fake'") ) eq_(result.rowcount, 1) with capture_engine_context_buffer() as buf: command.stamp(self.cfg, [self.a, self.e, self.f], purge=True) self._assert_sql(buf.getvalue(), None, {self.a, self.e, self.f}) def test_stamp_purge_no_sql(self): assert_raises_message( util.CommandError, "Can't use --purge with --sql mode", command.stamp, self.cfg, [self.c], sql=True, purge=True, ) class UpgradeDowngradeStampTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = cfg = _no_sql_testing_config() cfg.set_main_option("dialect_name", "sqlite") cfg.remove_main_option("url") self.a, self.b, self.c = three_rev_fixture(cfg) def tearDown(self): clear_staging_env() def test_version_from_none_insert(self): with capture_context_buffer() as buf: command.upgrade(self.cfg, self.a, sql=True) assert "CREATE TABLE alembic_version" in buf.getvalue() assert "INSERT INTO alembic_version" in buf.getvalue() assert "CREATE STEP 1" in buf.getvalue() assert "CREATE STEP 2" not in buf.getvalue() assert "CREATE STEP 3" not in buf.getvalue() def test_version_from_middle_update(self): with capture_context_buffer() as buf: command.upgrade(self.cfg, "%s:%s" % (self.b, self.c), sql=True) assert "CREATE TABLE alembic_version" not in buf.getvalue() assert "UPDATE alembic_version" in buf.getvalue() assert "CREATE STEP 1" not in buf.getvalue() assert "CREATE STEP 2" not in buf.getvalue() assert "CREATE STEP 3" in buf.getvalue() def test_version_to_none(self): with capture_context_buffer() as buf: command.downgrade(self.cfg, "%s:base" % self.c, sql=True) assert "CREATE TABLE alembic_version" not in buf.getvalue() assert "INSERT INTO alembic_version" not in buf.getvalue() assert "DROP TABLE alembic_version" in buf.getvalue() assert "DROP STEP 3" in buf.getvalue() assert "DROP STEP 2" in buf.getvalue() assert "DROP STEP 1" in buf.getvalue() def test_version_to_middle(self): with capture_context_buffer() as buf: command.downgrade(self.cfg, "%s:%s" % (self.c, self.a), sql=True) assert "CREATE TABLE alembic_version" not in buf.getvalue() assert "INSERT INTO alembic_version" not in buf.getvalue() assert "DROP TABLE alembic_version" not in buf.getvalue() assert "DROP STEP 3" in buf.getvalue() assert "DROP STEP 2" in buf.getvalue() assert "DROP STEP 1" not in buf.getvalue() def test_none_to_head_sql(self): with capture_context_buffer() as buf: command.upgrade(self.cfg, "head", sql=True) assert "CREATE TABLE alembic_version" in buf.getvalue() assert "UPDATE alembic_version" in buf.getvalue() assert "CREATE STEP 1" in buf.getvalue() assert "CREATE STEP 2" in buf.getvalue() assert "CREATE STEP 3" in buf.getvalue() def test_base_to_head_sql(self): with capture_context_buffer() as buf: command.upgrade(self.cfg, "base:head", sql=True) assert "CREATE TABLE alembic_version" in buf.getvalue() assert "UPDATE alembic_version" in buf.getvalue() assert "CREATE STEP 1" in buf.getvalue() assert "CREATE STEP 2" in buf.getvalue() assert "CREATE STEP 3" in buf.getvalue() def test_sql_stamp_from_rev(self): with capture_context_buffer() as buf: command.stamp(self.cfg, "%s:head" % self.a, sql=True) assert ( "UPDATE alembic_version " "SET version_num='%s' " "WHERE alembic_version.version_num = '%s';" % (self.c, self.a) ) in buf.getvalue() def test_sql_stamp_from_partial_rev(self): with capture_context_buffer() as buf: command.stamp(self.cfg, "%s:head" % self.a[0:7], sql=True) assert ( "UPDATE alembic_version " "SET version_num='%s' " "WHERE alembic_version.version_num = '%s';" % (self.c, self.a) ) in buf.getvalue() def test_sql_stamp_revision_as_kw(self): with capture_context_buffer() as buf: command.stamp(self.cfg, revision="head", sql=True) assert ( "INSERT INTO alembic_version (version_num) VALUES ('%s')" % self.c in buf.getvalue() ) def test_stamp_argparser_single_rev(self): cmd = config.CommandLine() options = cmd.parser.parse_args(["stamp", self.c, "--sql"]) with capture_context_buffer() as buf: cmd.run_cmd(self.cfg, options) assert ( "INSERT INTO alembic_version (version_num) VALUES ('%s')" % self.c in buf.getvalue() ) def test_stamp_argparser_multiple_rev(self): cmd = config.CommandLine() options = cmd.parser.parse_args(["stamp", self.b, self.c, "--sql"]) with capture_context_buffer() as buf: cmd.run_cmd(self.cfg, options) # TODO: this is still wrong, as this stamp command is putting # conflicting heads into the table. The test here is only to test # that the revisions are passed as a list. assert ( "INSERT INTO alembic_version (version_num) VALUES ('%s')" % self.b in buf.getvalue() ) assert ( "INSERT INTO alembic_version (version_num) VALUES ('%s')" % self.c in buf.getvalue() ) class LiveStampTest(TestBase): __only_on__ = "sqlite" def setUp(self): self.bind = _sqlite_file_db() self.env = staging_env() self.cfg = _sqlite_testing_config() self.a = a = util.rev_id() self.b = b = util.rev_id() script = ScriptDirectory.from_config(self.cfg) script.generate_revision(a, None, refresh=True) write_script( script, a, """ revision = '%s' down_revision = None """ % a, ) script.generate_revision(b, None, refresh=True) write_script( script, b, """ revision = '%s' down_revision = '%s' """ % (b, a), ) def tearDown(self): clear_staging_env() def test_stamp_creates_table(self): command.stamp(self.cfg, "head") with self.bind.connect() as conn: eq_( conn.scalar(text("select version_num from alembic_version")), self.b, ) def test_stamp_existing_upgrade(self): command.stamp(self.cfg, self.a) command.stamp(self.cfg, self.b) with self.bind.connect() as conn: eq_( conn.scalar(text("select version_num from alembic_version")), self.b, ) def test_stamp_existing_downgrade(self): command.stamp(self.cfg, self.b) command.stamp(self.cfg, self.a) with self.bind.connect() as conn: eq_( conn.scalar(text("select version_num from alembic_version")), self.a, ) def test_stamp_version_already_there(self): command.stamp(self.cfg, self.b) command.stamp(self.cfg, self.b) with self.bind.connect() as conn: eq_( conn.scalar(text("select version_num from alembic_version")), self.b, ) class EditTest(TestBase): @classmethod def setup_class(cls): cls.env = staging_env() cls.cfg = _sqlite_testing_config() cls.a, cls.b, cls.c = three_rev_fixture(cls.cfg) @classmethod def teardown_class(cls): clear_staging_env() def setUp(self): command.stamp(self.cfg, "base") def test_edit_head(self): expected_call_arg = os.path.normpath( "%s/scripts/versions/%s_revision_c.py" % (EditTest.cfg.config_args["here"], EditTest.c) ) with mock.patch("alembic.util.open_in_editor") as edit: command.edit(self.cfg, "head") edit.assert_called_with(expected_call_arg) def test_edit_b(self): expected_call_arg = os.path.normpath( "%s/scripts/versions/%s_revision_b.py" % (EditTest.cfg.config_args["here"], EditTest.b) ) with mock.patch("alembic.util.open_in_editor") as edit: command.edit(self.cfg, self.b[0:3]) edit.assert_called_with(expected_call_arg) def test_edit_no_revs(self): assert_raises_message( util.CommandError, "No revision files indicated by symbol 'base'", command.edit, self.cfg, "base", ) def test_edit_no_current(self): assert_raises_message( util.CommandError, "No current revisions", command.edit, self.cfg, "current", ) def test_edit_current(self): expected_call_arg = os.path.normpath( "%s/scripts/versions/%s_revision_b.py" % (EditTest.cfg.config_args["here"], EditTest.b) ) command.stamp(self.cfg, self.b) with mock.patch("alembic.util.open_in_editor") as edit: command.edit(self.cfg, "current") edit.assert_called_with(expected_call_arg) class CommandLineTest(TestBase): @classmethod def setup_class(cls): cls.env = staging_env() cls.cfg = _sqlite_testing_config() cls.a, cls.b, cls.c = three_rev_fixture(cls.cfg) def teardown(self): os.environ.pop("ALEMBIC_CONFIG", None) @classmethod def teardown_class(cls): clear_staging_env() def test_run_cmd_args_missing(self): canary = mock.Mock() orig_revision = command.revision # the command function has "process_revision_directives" # however the ArgumentParser does not. ensure things work def revision( config, message=None, autogenerate=False, sql=False, head="head", splice=False, branch_label=None, version_path=None, rev_id=None, depends_on=None, process_revision_directives=None, ): canary(config, message=message) revision.__module__ = "alembic.command" # CommandLine() pulls the function into the ArgumentParser # and needs the full signature, so we can't patch the "revision" # command normally as ArgumentParser gives us no way to get to it. config.command.revision = revision try: commandline = config.CommandLine() options = commandline.parser.parse_args(["revision", "-m", "foo"]) commandline.run_cmd(self.cfg, options) finally: config.command.revision = orig_revision eq_(canary.mock_calls, [mock.call(self.cfg, message="foo")]) def test_help_text(self): commands = { fn.__name__ for fn in [getattr(command, n) for n in dir(command)] if inspect.isfunction(fn) and fn.__name__[0] != "_" and fn.__module__ == "alembic.command" } # make sure we found them assert commands.intersection( {"upgrade", "downgrade", "merge", "revision"} ) # catch help text coming intersection with mock.patch("alembic.config.ArgumentParser") as argparse: config.CommandLine() for kall in argparse().add_subparsers().mock_calls: for sub_kall in kall.call_list(): if sub_kall[0] == "add_parser": cmdname = sub_kall[1][0] help_text = sub_kall[2]["help"] if help_text: commands.remove(cmdname) # more than two spaces assert not re.search(r" ", help_text) # no markup stuff assert ":" not in help_text # no newlines assert "\n" not in help_text # ends with a period assert help_text.endswith(".") # not too long assert len(help_text) < 80 assert not commands, "Commands without help text: %s" % commands def test_init_file_exists_and_is_not_empty(self): with mock.patch( "alembic.command.os.listdir", return_value=["file1", "file2"] ), mock.patch("alembic.command.os.access", return_value=True): directory = "alembic" assert_raises_message( util.CommandError, "Directory %s already exists and is not empty" % directory, command.init, self.cfg, directory=directory, ) def test_config_file_default(self): cl = config.CommandLine() with mock.patch.object(cl, "run_cmd") as run_cmd: cl.main(argv=["list_templates"]) cfg = run_cmd.mock_calls[0][1][0] eq_(cfg.config_file_name, "alembic.ini") def test_config_file_c_override(self): cl = config.CommandLine() with mock.patch.object(cl, "run_cmd") as run_cmd: cl.main(argv=["-c", "myconf.ini", "list_templates"]) cfg = run_cmd.mock_calls[0][1][0] eq_(cfg.config_file_name, "myconf.ini") def test_config_file_env_variable(self): os.environ["ALEMBIC_CONFIG"] = "/foo/bar/bat.conf" cl = config.CommandLine() with mock.patch.object(cl, "run_cmd") as run_cmd: cl.main(argv=["list_templates"]) cfg = run_cmd.mock_calls[0][1][0] eq_(cfg.config_file_name, "/foo/bar/bat.conf") def test_config_file_env_variable_c_override(self): os.environ["ALEMBIC_CONFIG"] = "/foo/bar/bat.conf" cl = config.CommandLine() with mock.patch.object(cl, "run_cmd") as run_cmd: cl.main(argv=["-c", "myconf.conf", "list_templates"]) cfg = run_cmd.mock_calls[0][1][0] eq_(cfg.config_file_name, "myconf.conf") def test_init_file_exists_and_is_empty(self): def access_(path, mode): if "generic" in path or path == "foobar": return True else: return False def listdir_(path): if path == "foobar": return [] else: return ["file1", "file2", "alembic.ini.mako"] with mock.patch( "alembic.command.os.access", side_effect=access_ ), mock.patch("alembic.command.os.makedirs") as makedirs, mock.patch( "alembic.command.os.listdir", side_effect=listdir_ ), mock.patch( "alembic.command.ScriptDirectory" ): command.init(self.cfg, directory="foobar") eq_( makedirs.mock_calls, [mock.call(os.path.normpath("foobar/versions"))], ) def test_init_file_doesnt_exist(self): def access_(path, mode): if "generic" in path: return True else: return False with mock.patch( "alembic.command.os.access", side_effect=access_ ), mock.patch("alembic.command.os.makedirs") as makedirs, mock.patch( "alembic.command.ScriptDirectory" ): command.init(self.cfg, directory="foobar") eq_( makedirs.mock_calls, [ mock.call("foobar"), mock.call(os.path.normpath("foobar/versions")), ], ) def test_init_w_package(self): path = os.path.join(_get_staging_directory(), "foobar") with mock.patch("alembic.command.open") as open_: command.init(self.cfg, directory=path, package=True) eq_( open_.mock_calls, [ mock.call( os.path.abspath(os.path.join(path, "__init__.py")), "w" ), mock.call().close(), mock.call( os.path.abspath( os.path.join(path, "versions", "__init__.py") ), "w", ), mock.call().close(), ], ) def test_version_text(self): buf = StringIO() to_mock = "sys.stdout" with mock.patch(to_mock, buf): try: config.CommandLine(prog="test_prog").main(argv=["--version"]) assert False except SystemExit: pass is_true("test_prog" in str(buf.getvalue())) is_true(__version__ in str(buf.getvalue())) class EnureVersionTest(TestBase): @classmethod def setup_class(cls): cls.bind = _sqlite_file_db() cls.env = staging_env() cls.cfg = _sqlite_testing_config() @classmethod def teardown_class(cls): clear_staging_env() def test_ensure_version(self): command.ensure_version(self.cfg) engine = cast(Engine, self.bind) with engine.connect() as conn: is_true(_connectable_has_table(conn, "alembic_version", None)) def test_ensure_version_called_twice(self): command.ensure_version(self.cfg) command.ensure_version(self.cfg) engine = cast(Engine, self.bind) with engine.connect() as conn: is_true(_connectable_has_table(conn, "alembic_version", None)) def test_sql_ensure_version(self): with capture_context_buffer() as buf: command.ensure_version(self.cfg, sql=True) is_true(buf.getvalue().startswith("CREATE TABLE alembic_version")) alembic-rel_1_7_6/tests/test_config.py000066400000000000000000000166001417624537100201620ustar00rootroot00000000000000#!coding: utf-8 import os import tempfile from alembic import config from alembic import testing from alembic import util from alembic.migration import MigrationContext from alembic.operations import Operations from alembic.script import ScriptDirectory from alembic.testing import assert_raises_message from alembic.testing import eq_ from alembic.testing import mock from alembic.testing.assertions import expect_raises_message from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import _write_config_file from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.fixtures import capture_db from alembic.testing.fixtures import TestBase class FileConfigTest(TestBase): def test_config_args(self): cfg = _write_config_file( """ [alembic] migrations = %(base_path)s/db/migrations """ ) test_cfg = config.Config( cfg.config_file_name, config_args=dict(base_path="/home/alembic") ) eq_( test_cfg.get_section_option("alembic", "migrations"), "/home/alembic/db/migrations", ) def tearDown(self): clear_staging_env() class ConfigTest(TestBase): def test_config_no_file_main_option(self): cfg = config.Config() cfg.set_main_option("url", "postgresql://foo/bar") eq_(cfg.get_main_option("url"), "postgresql://foo/bar") def test_config_no_file_section_option(self): cfg = config.Config() cfg.set_section_option("foo", "url", "postgresql://foo/bar") eq_(cfg.get_section_option("foo", "url"), "postgresql://foo/bar") cfg.set_section_option("foo", "echo", "True") eq_(cfg.get_section_option("foo", "echo"), "True") def test_config_set_main_option_percent(self): cfg = config.Config() cfg.set_main_option("foob", "a %% percent") eq_(cfg.get_main_option("foob"), "a % percent") def test_config_set_section_option_percent(self): cfg = config.Config() cfg.set_section_option("some_section", "foob", "a %% percent") eq_(cfg.get_section_option("some_section", "foob"), "a % percent") def test_config_set_section_option_interpolation(self): cfg = config.Config() cfg.set_section_option("some_section", "foob", "foob_value") cfg.set_section_option("some_section", "bar", "bar with %(foob)s") eq_( cfg.get_section_option("some_section", "bar"), "bar with foob_value", ) def test_standalone_op(self): eng, buf = capture_db() env = MigrationContext.configure(eng) op = Operations(env) op.alter_column("t", "c", nullable=True) eq_(buf, ["ALTER TABLE t ALTER COLUMN c DROP NOT NULL"]) def test_no_script_error(self): cfg = config.Config() assert_raises_message( util.CommandError, "No 'script_location' key found in configuration.", ScriptDirectory.from_config, cfg, ) def test_attributes_attr(self): m1 = mock.Mock() cfg = config.Config() cfg.attributes["connection"] = m1 eq_(cfg.attributes["connection"], m1) def test_attributes_construtor(self): m1 = mock.Mock() m2 = mock.Mock() cfg = config.Config(attributes={"m1": m1}) cfg.attributes["connection"] = m2 eq_(cfg.attributes, {"m1": m1, "connection": m2}) @testing.combinations( ( "legacy raw string 1", None, "/foo", ["/foo"], ), ( "legacy raw string 2", None, "/foo /bar", ["/foo", "/bar"], ), ( "legacy raw string 3", "space", "/foo", ["/foo"], ), ( "legacy raw string 4", "space", "/foo /bar", ["/foo", "/bar"], ), ( "Linux pathsep 1", ":", "/Project A", ["/Project A"], ), ( "Linux pathsep 2", ":", "/Project A:/Project B", ["/Project A", "/Project B"], ), ( "Windows pathsep 1", ";", r"C:\Project A", [r"C:\Project A"], ), ( "Windows pathsep 2", ";", r"C:\Project A;C:\Project B", [r"C:\Project A", r"C:\Project B"], ), ( "os pathsep", "os", r"path_number_one%(sep)spath_number_two%(sep)s" % {"sep": os.pathsep}, [r"path_number_one", r"path_number_two"], ), ( "invalid pathsep 2", "|", "/foo|/bar", ValueError( "'|' is not a valid value for version_path_separator; " "expected 'space', 'os', ':', ';'" ), ), id_="iaaa", argnames="separator, string_value, expected_result", ) def test_version_locations(self, separator, string_value, expected_result): cfg = config.Config() if separator is not None: cfg.set_main_option( "version_path_separator", separator, ) cfg.set_main_option("script_location", tempfile.gettempdir()) cfg.set_main_option("version_locations", string_value) if isinstance(expected_result, ValueError): with expect_raises_message(ValueError, expected_result.args[0]): ScriptDirectory.from_config(cfg) else: s = ScriptDirectory.from_config(cfg) eq_(s.version_locations, expected_result) class StdoutOutputEncodingTest(TestBase): def test_plain(self): stdout = mock.Mock(encoding="latin-1") cfg = config.Config(stdout=stdout) cfg.print_stdout("test %s %s", "x", "y") eq_( stdout.mock_calls, [mock.call.write("test x y"), mock.call.write("\n")], ) def test_utf8_unicode(self): stdout = mock.Mock(encoding="latin-1") cfg = config.Config(stdout=stdout) cfg.print_stdout("méil %s %s", "x", "y") eq_( stdout.mock_calls, [mock.call.write("méil x y"), mock.call.write("\n")], ) def test_ascii_unicode(self): stdout = mock.Mock(encoding=None) cfg = config.Config(stdout=stdout) cfg.print_stdout("méil %s %s", "x", "y") eq_( stdout.mock_calls, [mock.call.write("m?il x y"), mock.call.write("\n")], ) def test_only_formats_output_with_args(self): stdout = mock.Mock(encoding=None) cfg = config.Config(stdout=stdout) cfg.print_stdout("test 3%") eq_( stdout.mock_calls, [mock.call.write("test 3%"), mock.call.write("\n")], ) class TemplateOutputEncodingTest(TestBase): def setUp(self): staging_env() self.cfg = _no_sql_testing_config() def tearDown(self): clear_staging_env() def test_default(self): script = ScriptDirectory.from_config(self.cfg) eq_(script.output_encoding, "utf-8") def test_setting(self): self.cfg.set_main_option("output_encoding", "latin-1") script = ScriptDirectory.from_config(self.cfg) eq_(script.output_encoding, "latin-1") alembic-rel_1_7_6/tests/test_editor.py000066400000000000000000000074041417624537100202050ustar00rootroot00000000000000import os from os.path import join from unittest.mock import patch from alembic import util from alembic.testing import combinations from alembic.testing import expect_raises_message from alembic.testing.fixtures import TestBase class TestHelpers(TestBase): def common(self, cb, is_posix=True): with patch("alembic.util.editor.check_call") as check_call, patch( "alembic.util.editor.exists" ) as exists, patch( "alembic.util.editor.is_posix", new=is_posix, ), patch( "os.pathsep", new=":" if is_posix else ";" ): cb(check_call, exists) @combinations((True,), (False,)) def test_edit_with_user_editor(self, posix): def go(check_call, exists): test_environ = {"EDITOR": "myvim", "PATH": "/usr/bin"} executable = join("/usr/bin", "myvim") if not posix: executable += ".exe" exists.side_effect = lambda fname: fname == executable util.open_in_editor("myfile", test_environ) check_call.assert_called_with([executable, "myfile"]) self.common(go, posix) @combinations(("EDITOR",), ("VISUAL",)) def test_edit_with_user_editor_exists(self, key): def go(check_call, exists): test_environ = {key: "myvim", "PATH": "/usr/bin"} exists.side_effect = lambda fname: fname == "myvim" util.open_in_editor("myfile", test_environ) check_call.assert_called_with(["myvim", "myfile"]) self.common(go) @combinations((True,), (False,)) def test_edit_with_user_editor_precedence(self, with_path): def go(check_call, exists): test_environ = { "EDITOR": "myvim", "VISUAL": "myvisual", "PATH": "/usr/bin", } exes = ["myvim", "myvisual"] if with_path: exes = [join("/usr/bin", n) for n in exes] exists.side_effect = lambda fname: fname in exes util.open_in_editor("myfile", test_environ) check_call.assert_called_with([exes[0], "myfile"]) self.common(go) def test_edit_with_user_editor_abs(self): def go(check_call, exists): test_environ = {"EDITOR": "/foo/myvim", "PATH": "/usr/bin"} exists.side_effect = lambda fname: fname == "/usr/bin/foo/myvim" with expect_raises_message(util.CommandError, "EDITOR"): util.open_in_editor("myfile", test_environ) self.common(go) def test_edit_with_default_editor(self): def go(check_call, exists): test_environ = {"PATH": os.pathsep.join(["/usr/bin", "/bin"])} executable = join("/bin", "vim") exists.side_effect = lambda fname: fname == executable util.open_in_editor("myfile", test_environ) check_call.assert_called_with([executable, "myfile"]) self.common(go) def test_edit_with_default_editor_windows(self): def go(check_call, exists): test_environ = { "PATH": os.pathsep.join( [r"C:\Windows\System32", r"C:\Users\user\bin"] ) } executable = join(r"C:\Users\user\bin", "notepad.exe") exists.side_effect = lambda fname: fname == executable util.open_in_editor("myfile", test_environ) check_call.assert_called_with([executable, "myfile"]) self.common(go, False) def test_edit_with_missing_editor(self): def go(check_call, exists): test_environ = {} exists.return_value = False with expect_raises_message(util.CommandError, "EDITOR"): util.open_in_editor("myfile", test_environ) self.common(go) alembic-rel_1_7_6/tests/test_environment.py000066400000000000000000000120461417624537100212610ustar00rootroot00000000000000#!coding: utf-8 import os import sys from alembic import command from alembic import testing from alembic import util from alembic.environment import EnvironmentContext from alembic.migration import MigrationContext from alembic.script import ScriptDirectory from alembic.testing import config from alembic.testing import eq_ from alembic.testing import is_ from alembic.testing import mock from alembic.testing.assertions import expect_raises_message from alembic.testing.env import _get_staging_directory from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import _sqlite_file_db from alembic.testing.env import _sqlite_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.env import write_script from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import TestBase class EnvironmentTest(TestBase): def setUp(self): staging_env() self.cfg = _no_sql_testing_config() def tearDown(self): clear_staging_env() def _fixture(self, **kw): script = ScriptDirectory.from_config(self.cfg) env = EnvironmentContext(self.cfg, script, **kw) return env def test_x_arg(self): env = self._fixture() self.cfg.cmd_opts = mock.Mock(x="y=5") eq_(env.get_x_argument(), "y=5") def test_x_arg_asdict(self): env = self._fixture() self.cfg.cmd_opts = mock.Mock(x=["y=5"]) eq_(env.get_x_argument(as_dictionary=True), {"y": "5"}) def test_x_arg_no_opts(self): env = self._fixture() eq_(env.get_x_argument(), []) def test_x_arg_no_opts_asdict(self): env = self._fixture() eq_(env.get_x_argument(as_dictionary=True), {}) def test_tag_arg(self): env = self._fixture(tag="x") eq_(env.get_tag_argument(), "x") def test_migration_context_has_config(self): env = self._fixture() env.configure(url="sqlite://") ctx = env._migration_context is_(ctx.config, self.cfg) ctx = MigrationContext(ctx.dialect, None, {}) is_(ctx.config, None) def test_sql_mode_parameters(self): env = self._fixture() a_rev = "arev" env.script.generate_revision(a_rev, "revision a", refresh=True) write_script( env.script, a_rev, """\ "Rev A" revision = '{}' down_revision = None from alembic import op def upgrade(): op.execute(''' do some SQL thing with a % percent sign % ''') """.format( a_rev ), ) with capture_context_buffer(transactional_ddl=True) as buf: command.upgrade(self.cfg, "arev", sql=True) assert "do some SQL thing with a % percent sign %" in buf.getvalue() @config.requirements.legacy_engine @testing.uses_deprecated( r"The Engine.execute\(\) function/method is considered legacy" ) def test_error_on_passing_engine(self): env = self._fixture() engine = _sqlite_file_db() a_rev = "arev" env.script.generate_revision(a_rev, "revision a", refresh=True) write_script( env.script, a_rev, """\ "Rev A" revision = '%s' down_revision = None from alembic import op def upgrade(): pass def downgrade(): pass """ % a_rev, ) migration_fn = mock.MagicMock() def upgrade(rev, context): migration_fn(rev, context) return env.script._upgrade_revs(a_rev, rev) with expect_raises_message( util.CommandError, r"'connection' argument to configure\(\) is " r"expected to be a sqlalchemy.engine.Connection ", ): env.configure( connection=engine, fn=upgrade, transactional_ddl=False ) class CWDTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _sqlite_testing_config() def tearDown(self): clear_staging_env() @testing.combinations( ( ".", ["."], ), ("/tmp/foo:/tmp/bar", ["/tmp/foo", "/tmp/bar"]), ("/tmp/foo /tmp/bar", ["/tmp/foo", "/tmp/bar"]), ("/tmp/foo,/tmp/bar", ["/tmp/foo", "/tmp/bar"]), (". /tmp/foo", [".", "/tmp/foo"]), ) def test_sys_path_prepend(self, config_value, expected): self.cfg.set_main_option("prepend_sys_path", config_value) script = ScriptDirectory.from_config(self.cfg) env = EnvironmentContext(self.cfg, script) target = os.path.abspath(_get_staging_directory()) def assert_(heads, context): eq_( [os.path.abspath(p) for p in sys.path[0 : len(expected)]], [os.path.abspath(p) for p in expected], ) return [] p = [p for p in sys.path if os.path.abspath(p) != target] with mock.patch.object(sys, "path", p): env.configure(url="sqlite://", fn=assert_) with env: script.run_env() alembic-rel_1_7_6/tests/test_external_dialect.py000066400000000000000000000102331417624537100222200ustar00rootroot00000000000000from sqlalchemy import MetaData from sqlalchemy import types as sqla_types from sqlalchemy.engine import default from alembic import autogenerate from alembic.autogenerate import api from alembic.autogenerate import render from alembic.ddl import impl from alembic.migration import MigrationContext from alembic.testing import eq_ from alembic.testing import eq_ignore_whitespace from alembic.testing.fixtures import TestBase class CustomDialect(default.DefaultDialect): name = "custom_dialect" try: from sqlalchemy.dialects import registry except ImportError: pass else: registry.register("custom_dialect", __name__, "CustomDialect") class CustomDialectImpl(impl.DefaultImpl): __dialect__ = "custom_dialect" transactional_ddl = False def render_type(self, type_, autogen_context): if type_.__module__ == __name__: autogen_context.imports.add( "from %s import custom_dialect_types" % (__name__,) ) is_external = True else: is_external = False if is_external and hasattr( self, "_render_%s_type" % type_.__visit_name__ ): meth = getattr(self, "_render_%s_type" % type_.__visit_name__) return meth(type_, autogen_context) if is_external: return "%s.%r" % ("custom_dialect_types", type_) else: return None def _render_EXT_ARRAY_type(self, type_, autogen_context): return render._render_type_w_subtype( type_, autogen_context, "item_type", r"(.+?\()", prefix="custom_dialect_types.", ) class EXT_ARRAY(sqla_types.TypeEngine): __visit_name__ = "EXT_ARRAY" def __init__(self, item_type): if isinstance(item_type, type): item_type = item_type() self.item_type = item_type super(EXT_ARRAY, self).__init__() class FOOBARTYPE(sqla_types.TypeEngine): __visit_name__ = "FOOBARTYPE" class ExternalDialectRenderTest(TestBase): def setUp(self): ctx_opts = { "sqlalchemy_module_prefix": "sa.", "alembic_module_prefix": "op.", "target_metadata": MetaData(), "user_module_prefix": None, } context = MigrationContext.configure( dialect_name="custom_dialect", opts=ctx_opts ) self.autogen_context = api.AutogenContext(context) def test_render_type(self): eq_ignore_whitespace( autogenerate.render._repr_type(FOOBARTYPE(), self.autogen_context), "custom_dialect_types.FOOBARTYPE()", ) eq_( self.autogen_context.imports, set( [ "from tests.test_external_dialect " "import custom_dialect_types" ] ), ) def test_external_nested_render_sqla_type(self): eq_ignore_whitespace( autogenerate.render._repr_type( EXT_ARRAY(sqla_types.Integer), self.autogen_context ), "custom_dialect_types.EXT_ARRAY(sa.Integer())", ) eq_ignore_whitespace( autogenerate.render._repr_type( EXT_ARRAY(sqla_types.DateTime(timezone=True)), self.autogen_context, ), "custom_dialect_types.EXT_ARRAY(sa.DateTime(timezone=True))", ) eq_( self.autogen_context.imports, set( [ "from tests.test_external_dialect " "import custom_dialect_types" ] ), ) def test_external_nested_render_external_type(self): eq_ignore_whitespace( autogenerate.render._repr_type( EXT_ARRAY(FOOBARTYPE), self.autogen_context ), "custom_dialect_types.EXT_ARRAY" "(custom_dialect_types.FOOBARTYPE())", ) eq_( self.autogen_context.imports, set( [ "from tests.test_external_dialect " "import custom_dialect_types" ] ), ) alembic-rel_1_7_6/tests/test_impl.py000066400000000000000000000024201417624537100176510ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import Table from sqlalchemy.sql import text from alembic import testing from alembic.testing import eq_ from alembic.testing.fixtures import FutureEngineMixin from alembic.testing.fixtures import TablesTest class ImplTest(TablesTest): __only_on__ = "sqlite" @classmethod def define_tables(cls, metadata): Table( "some_table", metadata, Column("x", Integer), Column("y", Integer) ) @testing.fixture def impl(self, migration_context): with migration_context.begin_transaction(_per_migration=True): yield migration_context.impl def test_execute_params(self, impl): result = impl._exec(text("select :my_param"), params={"my_param": 5}) eq_(result.scalar(), 5) def test_execute_multiparams(self, impl): some_table = self.tables.some_table impl._exec( some_table.insert(), multiparams=[{"x": 1, "y": 2}, {"x": 2, "y": 3}, {"x": 5, "y": 7}], ) eq_( impl._exec( some_table.select().order_by(some_table.c.x) ).fetchall(), [(1, 2), (2, 3), (5, 7)], ) class FutureImplTest(FutureEngineMixin, ImplTest): pass alembic-rel_1_7_6/tests/test_mssql.py000066400000000000000000000407521417624537100200610ustar00rootroot00000000000000"""Test op functions against MSSQL.""" from sqlalchemy import Column from sqlalchemy import exc from sqlalchemy import Integer from sqlalchemy import String from alembic import command from alembic import op from alembic import util from alembic.testing import assert_raises_message from alembic.testing import combinations from alembic.testing import config from alembic.testing import eq_ from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.env import three_rev_fixture from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase from alembic.util import sqla_compat class FullEnvironmentTests(TestBase): @classmethod def setup_class(cls): staging_env() directives = "sqlalchemy.legacy_schema_aliasing=false" cls.cfg = cfg = _no_sql_testing_config("mssql", directives) cls.a, cls.b, cls.c = three_rev_fixture(cfg) @classmethod def teardown_class(cls): clear_staging_env() def test_begin_commit(self): with capture_context_buffer(transactional_ddl=True) as buf: command.upgrade(self.cfg, self.a, sql=True) assert "BEGIN TRANSACTION;" in buf.getvalue() # ensure ends in COMMIT; GO eq_( [x for x in buf.getvalue().splitlines() if x][-2:], ["COMMIT;", "GO"], ) def test_batch_separator_default(self): with capture_context_buffer() as buf: command.upgrade(self.cfg, self.a, sql=True) assert "GO" in buf.getvalue() def test_batch_separator_custom(self): with capture_context_buffer(mssql_batch_separator="BYE") as buf: command.upgrade(self.cfg, self.a, sql=True) assert "BYE" in buf.getvalue() class OpTest(TestBase): def test_add_column(self): context = op_fixture("mssql") op.add_column("t1", Column("c1", Integer, nullable=False)) context.assert_("ALTER TABLE t1 ADD c1 INTEGER NOT NULL") def test_add_column_with_default(self): context = op_fixture("mssql") op.add_column( "t1", Column("c1", Integer, nullable=False, server_default="12") ) context.assert_("ALTER TABLE t1 ADD c1 INTEGER NOT NULL DEFAULT '12'") def test_alter_column_rename_mssql(self): context = op_fixture("mssql") op.alter_column("t", "c", new_column_name="x") context.assert_("EXEC sp_rename 't.c', x, 'COLUMN'") def test_alter_column_rename_quoted_mssql(self): context = op_fixture("mssql") op.alter_column("t", "c", new_column_name="SomeFancyName") context.assert_("EXEC sp_rename 't.c', [SomeFancyName], 'COLUMN'") @combinations((True,), (False,), argnames="pass_existing_type") @combinations((True,), (False,), argnames="change_nullability") def test_alter_column_type_and_nullability( self, pass_existing_type, change_nullability ): context = op_fixture("mssql") args = dict(type_=Integer) if pass_existing_type: args["existing_type"] = String(15) if change_nullability: args["nullable"] = False op.alter_column("t", "c", **args) if change_nullability: context.assert_("ALTER TABLE t ALTER COLUMN c INTEGER NOT NULL") else: context.assert_("ALTER TABLE t ALTER COLUMN c INTEGER") def test_alter_column_dont_touch_constraints(self): context = op_fixture("mssql") from sqlalchemy import Boolean op.alter_column( "tests", "col", existing_type=Boolean(), nullable=False ) context.assert_("ALTER TABLE tests ALTER COLUMN col BIT NOT NULL") def test_drop_index(self): context = op_fixture("mssql") op.drop_index("my_idx", "my_table") context.assert_contains("DROP INDEX my_idx ON my_table") def test_drop_column_w_default(self): context = op_fixture("mssql") op.drop_column("t1", "c1", mssql_drop_default=True) op.drop_column("t1", "c2", mssql_drop_default=True) context.assert_contains( "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_w_default_in_batch(self): context = op_fixture("mssql") with op.batch_alter_table("t1", schema=None) as batch_op: batch_op.drop_column("c1", mssql_drop_default=True) batch_op.drop_column("c2", mssql_drop_default=True) context.assert_contains( "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t1 DROP COLUMN c1") def test_alter_column_drop_default(self): context = op_fixture("mssql") op.alter_column("t", "c", server_default=None) context.assert_contains( "declare @const_name varchar(256)select @const_name = [name] " "from sys.default_constraintswhere parent_object_id = " "object_id('t')and col_name(parent_object_id, " "parent_column_id) = 'c'" ) context.assert_contains( "exec('alter table t drop constraint ' + @const_name)" ) def test_alter_column_drop_default_w_schema(self): context = op_fixture("mssql") op.alter_column("t", "c", server_default=None, schema="xyz") context.assert_contains( "declare @const_name varchar(256)select @const_name = [name] " "from sys.default_constraintswhere parent_object_id = " "object_id('xyz.t')and col_name(parent_object_id, " "parent_column_id) = 'c'" ) context.assert_contains( "exec('alter table xyz.t drop constraint ' + @const_name)" ) def test_alter_column_dont_drop_default(self): context = op_fixture("mssql") op.alter_column("t", "c", server_default=False) context.assert_() def test_drop_column_w_schema(self): context = op_fixture("mssql") op.drop_column("t1", "c1", schema="xyz") context.assert_contains("ALTER TABLE xyz.t1 DROP COLUMN c1") def test_drop_column_w_check(self): context = op_fixture("mssql") op.drop_column("t1", "c1", mssql_drop_check=True) op.drop_column("t1", "c2", mssql_drop_check=True) context.assert_contains( "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_w_check_in_batch(self): context = op_fixture("mssql") with op.batch_alter_table("t1", schema=None) as batch_op: batch_op.drop_column("c1", mssql_drop_check=True) batch_op.drop_column("c2", mssql_drop_check=True) context.assert_contains( "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_w_check_quoting(self): context = op_fixture("mssql") op.drop_column("table", "column", mssql_drop_check=True) context.assert_contains( "exec('alter table [table] drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE [table] DROP COLUMN [column]") def test_alter_column_nullable_w_existing_type(self): context = op_fixture("mssql") op.alter_column("t", "c", nullable=True, existing_type=Integer) context.assert_("ALTER TABLE t ALTER COLUMN c INTEGER NULL") def test_drop_column_w_fk(self): context = op_fixture("mssql") op.drop_column("t1", "c1", mssql_drop_foreign_key=True) context.assert_contains( "declare @const_name varchar(256)\n" "select @const_name = [name] from\n" "sys.foreign_keys fk join sys.foreign_key_columns fkcon " "fk.object_id=fkc.constraint_object_id\n" "where fkc.parent_object_id = object_id('t1')\nand " "col_name(fkc.parent_object_id, fkc.parent_column_id) = 'c1'\n" "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains( "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_w_fk_schema(self): context = op_fixture("mssql") op.drop_column("t1", "c1", schema="xyz", mssql_drop_foreign_key=True) context.assert_contains( "declare @const_name varchar(256)\n" "select @const_name = [name] from\n" "sys.foreign_keys fk join sys.foreign_key_columns fkcon " "fk.object_id=fkc.constraint_object_id\n" "where fkc.parent_object_id = object_id('xyz.t1')\nand " "col_name(fkc.parent_object_id, fkc.parent_column_id) = 'c1'\n" "exec('alter table xyz.t1 drop constraint ' + @const_name)" ) context.assert_contains( "exec('alter table xyz.t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE xyz.t1 DROP COLUMN c1") def test_drop_column_w_fk_in_batch(self): context = op_fixture("mssql") with op.batch_alter_table("t1", schema=None) as batch_op: batch_op.drop_column("c1", mssql_drop_foreign_key=True) context.assert_contains( "exec('alter table t1 drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t1 DROP COLUMN c1") def test_alter_column_not_nullable_w_existing_type(self): context = op_fixture("mssql") op.alter_column("t", "c", nullable=False, existing_type=Integer) context.assert_("ALTER TABLE t ALTER COLUMN c INTEGER NOT NULL") def test_alter_column_nullable_w_new_type(self): context = op_fixture("mssql") op.alter_column("t", "c", nullable=True, type_=Integer) context.assert_("ALTER TABLE t ALTER COLUMN c INTEGER NULL") def test_alter_column_not_nullable_w_new_type(self): context = op_fixture("mssql") op.alter_column("t", "c", nullable=False, type_=Integer) context.assert_("ALTER TABLE t ALTER COLUMN c INTEGER NOT NULL") def test_alter_column_nullable_type_required(self): op_fixture("mssql") assert_raises_message( util.CommandError, "MS-SQL ALTER COLUMN operations with NULL or " "NOT NULL require the existing_type or a new " "type_ be passed.", op.alter_column, "t", "c", nullable=False, ) def test_alter_add_server_default(self): context = op_fixture("mssql") op.alter_column("t", "c", server_default="5") context.assert_("ALTER TABLE t ADD DEFAULT '5' FOR c") def test_alter_replace_server_default(self): context = op_fixture("mssql") op.alter_column( "t", "c", server_default="5", existing_server_default="6" ) context.assert_contains( "exec('alter table t drop constraint ' + @const_name)" ) context.assert_contains("ALTER TABLE t ADD DEFAULT '5' FOR c") def test_alter_remove_server_default(self): context = op_fixture("mssql") op.alter_column("t", "c", server_default=None) context.assert_contains( "exec('alter table t drop constraint ' + @const_name)" ) @config.requirements.computed_columns_api def test_add_column_computed(self): context = op_fixture("mssql") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Computed("foo * 5")), ) context.assert_("ALTER TABLE t1 ADD some_column AS (foo * 5)") def test_alter_do_everything(self): context = op_fixture("mssql") op.alter_column( "t", "c", new_column_name="c2", nullable=True, type_=Integer, server_default="5", ) context.assert_( "ALTER TABLE t ALTER COLUMN c INTEGER NULL", "ALTER TABLE t ADD DEFAULT '5' FOR c", "EXEC sp_rename 't.c', c2, 'COLUMN'", ) def test_rename_table(self): context = op_fixture("mssql") op.rename_table("t1", "t2") context.assert_contains("EXEC sp_rename 't1', t2") def test_rename_table_schema(self): context = op_fixture("mssql") op.rename_table("t1", "t2", schema="foobar") context.assert_contains("EXEC sp_rename 'foobar.t1', t2") def test_rename_table_casesens(self): context = op_fixture("mssql") op.rename_table("TeeOne", "TeeTwo") # yup, ran this in SQL Server 2014, the two levels of quoting # seems to be understood. Can't do the two levels on the # target name though ! context.assert_contains("EXEC sp_rename '[TeeOne]', [TeeTwo]") def test_rename_table_schema_casesens(self): context = op_fixture("mssql") op.rename_table("TeeOne", "TeeTwo", schema="FooBar") # yup, ran this in SQL Server 2014, the two levels of quoting # seems to be understood. Can't do the two levels on the # target name though ! context.assert_contains("EXEC sp_rename '[FooBar].[TeeOne]', [TeeTwo]") def test_alter_column_rename_mssql_schema(self): context = op_fixture("mssql") op.alter_column("t", "c", new_column_name="x", schema="y") context.assert_("EXEC sp_rename 'y.t.c', x, 'COLUMN'") def test_create_index_mssql_include(self): context = op_fixture("mssql") op.create_index( op.f("ix_mytable_a_b"), "mytable", ["col_a", "col_b"], unique=False, mssql_include=["col_c"], ) context.assert_contains( "CREATE INDEX ix_mytable_a_b ON mytable " "(col_a, col_b) INCLUDE (col_c)" ) def test_create_index_mssql_include_is_none(self): context = op_fixture("mssql") op.create_index( op.f("ix_mytable_a_b"), "mytable", ["col_a", "col_b"], unique=False ) context.assert_contains( "CREATE INDEX ix_mytable_a_b ON mytable " "(col_a, col_b)" ) @combinations( (lambda: sqla_compat.Computed("foo * 5"), lambda: None), (lambda: None, lambda: sqla_compat.Computed("foo * 5")), ( lambda: sqla_compat.Computed("foo * 42"), lambda: sqla_compat.Computed("foo * 5"), ), ) @config.requirements.computed_columns def test_alter_column_computed_not_supported(self, sd, esd): op_fixture("mssql") assert_raises_message( exc.CompileError, 'Adding or removing a "computed" construct, e.g. ' "GENERATED ALWAYS AS, to or from an existing column is not " "supported.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) @config.requirements.identity_columns @combinations( ({},), (dict(always=True),), (dict(start=3),), (dict(start=3, increment=3),), ) def test_add_column_identity(self, kw): context = op_fixture("mssql") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Identity(**kw)), ) if "start" in kw or "increment" in kw: options = "(%s,%s)" % ( kw.get("start", 1), kw.get("increment", 1), ) else: options = "" context.assert_( "ALTER TABLE t1 ADD some_column INTEGER NOT NULL IDENTITY%s" % options ) @combinations( (lambda: sqla_compat.Identity(), lambda: None), (lambda: None, lambda: sqla_compat.Identity()), ( lambda: sqla_compat.Identity(), lambda: sqla_compat.Identity(), ), ) @config.requirements.identity_columns def test_alter_column_identity_add_not_supported(self, sd, esd): op_fixture("mssql") assert_raises_message( exc.CompileError, 'Adding, removing or modifying an "identity" construct, ' "e.g. GENERATED AS IDENTITY, to or from an existing " "column is not supported in this dialect.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) alembic-rel_1_7_6/tests/test_mysql.py000066400000000000000000000532241417624537100200650ustar00rootroot00000000000000from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DATETIME from sqlalchemy import exc from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import text from sqlalchemy import TIMESTAMP from alembic import op from alembic import util from alembic.autogenerate import api from alembic.autogenerate import compare from alembic.migration import MigrationContext from alembic.operations import ops from alembic.testing import assert_raises_message from alembic.testing import combinations from alembic.testing import config from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.fixtures import AlterColRoundTripFixture from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase from alembic.util import sqla_compat class MySQLOpTest(TestBase): def test_create_table_with_comment(self): context = op_fixture("mysql") op.create_table( "t2", Column("c1", Integer, primary_key=True), comment="This is a table comment", ) context.assert_contains("COMMENT='This is a table comment'") def test_create_table_with_column_comments(self): context = op_fixture("mysql") op.create_table( "t2", Column("c1", Integer, primary_key=True, comment="c1 comment"), Column("c2", Integer, comment="c2 comment"), comment="This is a table comment", ) context.assert_( "CREATE TABLE t2 " "(c1 INTEGER NOT NULL COMMENT 'c1 comment' AUTO_INCREMENT, " # TODO: why is there no space at the end here? is that on the # SQLA side? "c2 INTEGER COMMENT 'c2 comment', PRIMARY KEY (c1))" "COMMENT='This is a table comment'" ) def test_add_column_with_comment(self): context = op_fixture("mysql") op.add_column("t", Column("q", Integer, comment="This is a comment")) context.assert_( "ALTER TABLE t ADD COLUMN q INTEGER COMMENT 'This is a comment'" ) def test_rename_column(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", new_column_name="c2", existing_type=Integer ) context.assert_("ALTER TABLE t1 CHANGE c1 c2 INTEGER NULL") def test_rename_column_quotes_needed_one(self): context = op_fixture("mysql") op.alter_column( "MyTable", "ColumnOne", new_column_name="ColumnTwo", existing_type=Integer, ) context.assert_( "ALTER TABLE `MyTable` CHANGE `ColumnOne` `ColumnTwo` INTEGER NULL" ) def test_rename_column_quotes_needed_two(self): context = op_fixture("mysql") op.alter_column( "my table", "column one", new_column_name="column two", existing_type=Integer, ) context.assert_( "ALTER TABLE `my table` CHANGE `column one` " "`column two` INTEGER NULL" ) def test_rename_column_serv_default(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", new_column_name="c2", existing_type=Integer, existing_server_default="q", ) context.assert_("ALTER TABLE t1 CHANGE c1 c2 INTEGER NULL DEFAULT 'q'") def test_rename_column_serv_compiled_default(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", existing_type=Integer, server_default=func.utc_thing(func.current_timestamp()), ) # this is not a valid MySQL default but the point is to just # test SQL expression rendering context.assert_( "ALTER TABLE t1 ALTER COLUMN c1 " "SET DEFAULT utc_thing(CURRENT_TIMESTAMP)" ) def test_rename_column_autoincrement(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", new_column_name="c2", existing_type=Integer, existing_autoincrement=True, ) context.assert_( "ALTER TABLE t1 CHANGE c1 c2 INTEGER NULL AUTO_INCREMENT" ) def test_col_add_autoincrement(self): context = op_fixture("mysql") op.alter_column("t1", "c1", existing_type=Integer, autoincrement=True) context.assert_("ALTER TABLE t1 MODIFY c1 INTEGER NULL AUTO_INCREMENT") def test_col_remove_autoincrement(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", existing_type=Integer, existing_autoincrement=True, autoincrement=False, ) context.assert_("ALTER TABLE t1 MODIFY c1 INTEGER NULL") def test_col_dont_remove_server_default(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", existing_type=Integer, existing_server_default="1", server_default=False, ) context.assert_() def test_alter_column_drop_default(self): context = op_fixture("mysql") op.alter_column("t", "c", existing_type=Integer, server_default=None) context.assert_("ALTER TABLE t ALTER COLUMN c DROP DEFAULT") def test_alter_column_remove_schematype(self): context = op_fixture("mysql") op.alter_column( "t", "c", type_=Integer, existing_type=Boolean(create_constraint=True, name="ck1"), server_default=None, ) context.assert_("ALTER TABLE t MODIFY c INTEGER NULL") def test_alter_column_modify_default(self): context = op_fixture("mysql") # notice we dont need the existing type on this one... op.alter_column("t", "c", server_default="1") context.assert_("ALTER TABLE t ALTER COLUMN c SET DEFAULT '1'") def test_alter_column_modify_datetime_default(self): # use CHANGE format when the datatype is DATETIME or TIMESTAMP, # as this is needed for a functional default which is what you'd # get with a DATETIME/TIMESTAMP. Will also work in the very unlikely # case the default is a fixed timestamp value. context = op_fixture("mysql") op.alter_column( "t", "c", existing_type=DATETIME(), server_default=text("CURRENT_TIMESTAMP"), ) context.assert_( "ALTER TABLE t CHANGE c c DATETIME NULL DEFAULT CURRENT_TIMESTAMP" ) def test_alter_column_modify_programmatic_default(self): # test issue #736 # when autogenerate.compare creates the operation object # programmatically, the server_default of the op has the full # DefaultClause present. make sure the usual renderer works. context = op_fixture("mysql") m1 = MetaData() autogen_context = api.AutogenContext(context, m1) operation = ops.AlterColumnOp("t", "c") for fn in ( compare._compare_nullable, compare._compare_type, compare._compare_server_default, ): fn( autogen_context, operation, None, "t", "c", Column("c", Float(), nullable=False, server_default=text("0")), Column("c", Float(), nullable=True, default=0), ) op.invoke(operation) context.assert_("ALTER TABLE t MODIFY c FLOAT NULL DEFAULT 0") def test_col_not_nullable(self): context = op_fixture("mysql") op.alter_column("t1", "c1", nullable=False, existing_type=Integer) context.assert_("ALTER TABLE t1 MODIFY c1 INTEGER NOT NULL") def test_col_not_nullable_existing_serv_default(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", nullable=False, existing_type=Integer, existing_server_default="5", ) context.assert_( "ALTER TABLE t1 MODIFY c1 INTEGER NOT NULL DEFAULT '5'" ) def test_col_nullable(self): context = op_fixture("mysql") op.alter_column("t1", "c1", nullable=True, existing_type=Integer) context.assert_("ALTER TABLE t1 MODIFY c1 INTEGER NULL") def test_col_multi_alter(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", nullable=False, server_default="q", type_=Integer ) context.assert_( "ALTER TABLE t1 MODIFY c1 INTEGER NOT NULL DEFAULT 'q'" ) def test_alter_column_multi_alter_w_drop_default(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", nullable=False, server_default=None, type_=Integer ) context.assert_("ALTER TABLE t1 MODIFY c1 INTEGER NOT NULL") def test_col_alter_type_required(self): op_fixture("mysql") assert_raises_message( util.CommandError, "MySQL CHANGE/MODIFY COLUMN operations require the existing type.", op.alter_column, "t1", "c1", nullable=False, server_default="q", ) def test_alter_column_add_comment(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", comment="This is a column comment", existing_type=Boolean(), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 MODIFY c1 BOOL NULL " "COMMENT 'This is a column comment'" ) def test_alter_column_add_comment_quoting(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", comment="This is a 'column' comment", existing_type=Boolean(), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 MODIFY c1 BOOL NULL " "COMMENT 'This is a ''column'' comment'" ) def test_alter_column_drop_comment(self): context = op_fixture("mysql") op.alter_column( "t", "c", existing_type=Boolean(), schema="foo", comment=None, existing_comment="This is a column comment", ) context.assert_("ALTER TABLE foo.t MODIFY c BOOL NULL") def test_alter_column_existing_comment(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", nullable=False, existing_comment="existing column comment", existing_type=Integer, ) context.assert_( "ALTER TABLE t1 MODIFY c1 INTEGER NOT NULL " "COMMENT 'existing column comment'" ) def test_rename_column_existing_comment(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", new_column_name="newc1", existing_nullable=False, existing_comment="existing column comment", existing_type=Integer, ) context.assert_( "ALTER TABLE t1 CHANGE c1 newc1 INTEGER NOT NULL " "COMMENT 'existing column comment'" ) def test_alter_column_new_comment_replaces_existing(self): context = op_fixture("mysql") op.alter_column( "t1", "c1", nullable=False, comment="This is a column comment", existing_comment="existing column comment", existing_type=Integer, ) context.assert_( "ALTER TABLE t1 MODIFY c1 INTEGER NOT NULL " "COMMENT 'This is a column comment'" ) def test_create_table_comment(self): # this is handled by SQLAlchemy's compilers context = op_fixture("mysql") op.create_table_comment("t2", comment="t2 table", schema="foo") context.assert_("ALTER TABLE foo.t2 COMMENT 't2 table'") def test_drop_table_comment(self): # this is handled by SQLAlchemy's compilers context = op_fixture("mysql") op.drop_table_comment("t2", existing_comment="t2 table", schema="foo") context.assert_("ALTER TABLE foo.t2 COMMENT ''") @config.requirements.computed_columns_api def test_add_column_computed(self): context = op_fixture("mysql") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Computed("foo * 5")), ) context.assert_( "ALTER TABLE t1 ADD COLUMN some_column " "INTEGER GENERATED ALWAYS AS (foo * 5)" ) def test_drop_fk(self): context = op_fixture("mysql") op.drop_constraint("f1", "t1", "foreignkey") context.assert_("ALTER TABLE t1 DROP FOREIGN KEY f1") def test_drop_fk_quoted(self): context = op_fixture("mysql") op.drop_constraint("MyFk", "MyTable", "foreignkey") context.assert_("ALTER TABLE `MyTable` DROP FOREIGN KEY `MyFk`") def test_drop_constraint_primary(self): context = op_fixture("mysql") op.drop_constraint("primary", "t1", type_="primary") context.assert_("ALTER TABLE t1 DROP PRIMARY KEY") def test_drop_unique(self): context = op_fixture("mysql") op.drop_constraint("f1", "t1", "unique") context.assert_("ALTER TABLE t1 DROP INDEX f1") def test_drop_unique_quoted(self): context = op_fixture("mysql") op.drop_constraint("MyUnique", "MyTable", "unique") context.assert_("ALTER TABLE `MyTable` DROP INDEX `MyUnique`") def test_drop_check_mariadb(self): context = op_fixture("mariadb") op.drop_constraint("f1", "t1", "check") context.assert_("ALTER TABLE t1 DROP CONSTRAINT f1") def test_drop_check_quoted_mariadb(self): context = op_fixture("mariadb") op.drop_constraint("MyCheck", "MyTable", "check") context.assert_("ALTER TABLE `MyTable` DROP CONSTRAINT `MyCheck`") def test_drop_check_mysql(self): context = op_fixture("mysql") op.drop_constraint("f1", "t1", "check") context.assert_("ALTER TABLE t1 DROP CHECK f1") def test_drop_check_quoted_mysql(self): context = op_fixture("mysql") op.drop_constraint("MyCheck", "MyTable", "check") context.assert_("ALTER TABLE `MyTable` DROP CHECK `MyCheck`") def test_drop_unknown(self): op_fixture("mysql") assert_raises_message( TypeError, "'type' can be one of 'check', 'foreignkey', " "'primary', 'unique', None", op.drop_constraint, "f1", "t1", "typo", ) def test_drop_generic_constraint(self): op_fixture("mysql") assert_raises_message( NotImplementedError, "No generic 'DROP CONSTRAINT' in MySQL - please " "specify constraint type", op.drop_constraint, "f1", "t1", ) @combinations( (lambda: sqla_compat.Computed("foo * 5"), lambda: None), (lambda: None, lambda: sqla_compat.Computed("foo * 5")), ( lambda: sqla_compat.Computed("foo * 42"), lambda: sqla_compat.Computed("foo * 5"), ), ) @config.requirements.computed_columns_api def test_alter_column_computed_not_supported(self, sd, esd): op_fixture("mssql") assert_raises_message( exc.CompileError, 'Adding or removing a "computed" construct, e.g. ' "GENERATED ALWAYS AS, to or from an existing column is not " "supported.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) @combinations( (lambda: sqla_compat.Identity(), lambda: None), (lambda: None, lambda: sqla_compat.Identity()), ( lambda: sqla_compat.Identity(), lambda: sqla_compat.Identity(), ), ) @config.requirements.identity_columns_api def test_alter_column_identity_not_supported(self, sd, esd): op_fixture() assert_raises_message( exc.CompileError, 'Adding, removing or modifying an "identity" construct, ' "e.g. GENERATED AS IDENTITY, to or from an existing " "column is not supported in this dialect.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) class MySQLBackendOpTest(AlterColRoundTripFixture, TestBase): __only_on__ = "mysql", "mariadb" __backend__ = True def test_add_timestamp_server_default_current_timestamp(self): self._run_alter_col( {"type": TIMESTAMP()}, {"server_default": text("CURRENT_TIMESTAMP")}, ) def test_add_datetime_server_default_current_timestamp(self): self._run_alter_col( {"type": DATETIME()}, {"server_default": text("CURRENT_TIMESTAMP")} ) def test_add_timestamp_server_default_now(self): self._run_alter_col( {"type": TIMESTAMP()}, {"server_default": text("NOW()")}, compare={"server_default": text("CURRENT_TIMESTAMP")}, ) def test_add_datetime_server_default_now(self): self._run_alter_col( {"type": DATETIME()}, {"server_default": text("NOW()")}, compare={"server_default": text("CURRENT_TIMESTAMP")}, ) def test_add_timestamp_server_default_current_timestamp_bundle_onupdate( self, ): # note SQLAlchemy reflection bundles the ON UPDATE part into the # server default reflection see # https://github.com/sqlalchemy/sqlalchemy/issues/4652 self._run_alter_col( {"type": TIMESTAMP()}, { "server_default": text( "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" ) }, ) def test_add_datetime_server_default_current_timestamp_bundle_onupdate( self, ): # note SQLAlchemy reflection bundles the ON UPDATE part into the # server default reflection see # https://github.com/sqlalchemy/sqlalchemy/issues/4652 self._run_alter_col( {"type": DATETIME()}, { "server_default": text( "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" ) }, ) class MySQLDefaultCompareTest(TestBase): __only_on__ = "mysql", "mariadb" __backend__ = True @classmethod def setup_class(cls): cls.bind = config.db staging_env() context = MigrationContext.configure( connection=cls.bind.connect(), opts={"compare_type": True, "compare_server_default": True}, ) connection = context.bind cls.autogen_context = { "imports": set(), "connection": connection, "dialect": connection.dialect, "context": context, } @classmethod def teardown_class(cls): clear_staging_env() def setUp(self): self.metadata = MetaData() def tearDown(self): with config.db.begin() as conn: self.metadata.drop_all(conn) def _compare_default_roundtrip(self, type_, txt, alternate=None): if alternate: expected = True else: alternate = txt expected = False t = Table( "test", self.metadata, Column( "somecol", type_, server_default=text(txt) if txt else None ), ) t2 = Table( "test", MetaData(), Column("somecol", type_, server_default=text(alternate)), ) assert ( self._compare_default(t, t2, t2.c.somecol, alternate) is expected ) def _compare_default(self, t1, t2, col, rendered): t1.create(self.bind) insp = inspect(self.bind) cols = insp.get_columns(t1.name) refl = Table(t1.name, MetaData()) sqla_compat._reflect_table(insp, refl, None) ctx = self.autogen_context["context"] return ctx.impl.compare_server_default( refl.c[cols[0]["name"]], col, rendered, cols[0]["default"] ) def test_compare_timestamp_current_timestamp(self): self._compare_default_roundtrip(TIMESTAMP(), "CURRENT_TIMESTAMP") def test_compare_timestamp_current_timestamp_diff(self): self._compare_default_roundtrip(TIMESTAMP(), None, "CURRENT_TIMESTAMP") def test_compare_timestamp_current_timestamp_bundle_onupdate(self): self._compare_default_roundtrip( TIMESTAMP(), "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" ) def test_compare_timestamp_current_timestamp_diff_bundle_onupdate(self): self._compare_default_roundtrip( TIMESTAMP(), None, "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" ) def test_compare_integer_from_none(self): self._compare_default_roundtrip(Integer(), None, "0") def test_compare_integer_same(self): self._compare_default_roundtrip(Integer(), "5") def test_compare_integer_diff(self): self._compare_default_roundtrip(Integer(), "5", "7") def test_compare_boolean_same(self): self._compare_default_roundtrip(Boolean(), "1") def test_compare_boolean_diff(self): self._compare_default_roundtrip(Boolean(), "1", "0") alembic-rel_1_7_6/tests/test_offline_environment.py000066400000000000000000000230661417624537100227670ustar00rootroot00000000000000import re from alembic import command from alembic import util from alembic.testing import assert_raises_message from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import env_file_fixture from alembic.testing.env import multi_heads_fixture from alembic.testing.env import staging_env from alembic.testing.env import three_rev_fixture from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import TestBase a = b = c = None class OfflineEnvironmentTest(TestBase): def setUp(self): staging_env() self.cfg = _no_sql_testing_config() global a, b, c a, b, c = three_rev_fixture(self.cfg) def tearDown(self): clear_staging_env() def test_not_requires_connection(self): env_file_fixture( """ assert not context.requires_connection() """ ) command.upgrade(self.cfg, a, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) def test_requires_connection(self): env_file_fixture( """ assert context.requires_connection() """ ) command.upgrade(self.cfg, a) command.downgrade(self.cfg, a) def test_starting_rev_post_context(self): env_file_fixture( """ context.configure(dialect_name='sqlite', starting_rev='x') assert context.get_starting_revision_argument() == 'x' """ ) command.upgrade(self.cfg, a, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) command.current(self.cfg) command.stamp(self.cfg, a) def test_starting_rev_pre_context(self): env_file_fixture( """ assert context.get_starting_revision_argument() == 'x' """ ) command.upgrade(self.cfg, "x:y", sql=True) command.downgrade(self.cfg, "x:y", sql=True) def test_starting_rev_pre_context_cmd_w_no_startrev(self): env_file_fixture( """ assert context.get_starting_revision_argument() == 'x' """ ) assert_raises_message( util.CommandError, "No starting revision argument is available.", command.current, self.cfg, ) def test_starting_rev_current_pre_context(self): env_file_fixture( """ assert context.get_starting_revision_argument() is None """ ) assert_raises_message( util.CommandError, "No starting revision argument is available.", command.current, self.cfg, ) def test_destination_rev_pre_context(self): env_file_fixture( """ assert context.get_revision_argument() == '%s' """ % b ) command.upgrade(self.cfg, b, sql=True) command.stamp(self.cfg, b, sql=True) command.downgrade(self.cfg, "%s:%s" % (c, b), sql=True) def test_destination_rev_pre_context_multihead(self): d, e, f = multi_heads_fixture(self.cfg, a, b, c) env_file_fixture( """ assert set(context.get_revision_argument()) == set(('%s', '%s', '%s', )) """ % (f, e, c) ) command.upgrade(self.cfg, "heads", sql=True) def test_destination_rev_post_context(self): env_file_fixture( """ context.configure(dialect_name='sqlite') assert context.get_revision_argument() == '%s' """ % b ) command.upgrade(self.cfg, b, sql=True) command.downgrade(self.cfg, "%s:%s" % (c, b), sql=True) command.stamp(self.cfg, b, sql=True) def test_destination_rev_post_context_multihead(self): d, e, f = multi_heads_fixture(self.cfg, a, b, c) env_file_fixture( """ context.configure(dialect_name='sqlite') assert set(context.get_revision_argument()) == set(('%s', '%s', '%s', )) """ % (f, e, c) ) command.upgrade(self.cfg, "heads", sql=True) def test_head_rev_pre_context(self): env_file_fixture( """ assert context.get_head_revision() == '%s' assert context.get_head_revisions() == ('%s', ) """ % (c, c) ) command.upgrade(self.cfg, b, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) command.stamp(self.cfg, b, sql=True) command.current(self.cfg) def test_head_rev_pre_context_multihead(self): d, e, f = multi_heads_fixture(self.cfg, a, b, c) env_file_fixture( """ assert set(context.get_head_revisions()) == set(('%s', '%s', '%s', )) """ % (e, f, c) ) command.upgrade(self.cfg, e, sql=True) command.downgrade(self.cfg, "%s:%s" % (e, b), sql=True) command.stamp(self.cfg, c, sql=True) command.current(self.cfg) def test_head_rev_post_context(self): env_file_fixture( """ context.configure(dialect_name='sqlite') assert context.get_head_revision() == '%s' assert context.get_head_revisions() == ('%s', ) """ % (c, c) ) command.upgrade(self.cfg, b, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) command.stamp(self.cfg, b, sql=True) command.current(self.cfg) def test_head_rev_post_context_multihead(self): d, e, f = multi_heads_fixture(self.cfg, a, b, c) env_file_fixture( """ context.configure(dialect_name='sqlite') assert set(context.get_head_revisions()) == set(('%s', '%s', '%s', )) """ % (e, f, c) ) command.upgrade(self.cfg, e, sql=True) command.downgrade(self.cfg, "%s:%s" % (e, b), sql=True) command.stamp(self.cfg, c, sql=True) command.current(self.cfg) def test_tag_pre_context(self): env_file_fixture( """ assert context.get_tag_argument() == 'hi' """ ) command.upgrade(self.cfg, b, sql=True, tag="hi") command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True, tag="hi") def test_tag_pre_context_None(self): env_file_fixture( """ assert context.get_tag_argument() is None """ ) command.upgrade(self.cfg, b, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) def test_tag_cmd_arg(self): env_file_fixture( """ context.configure(dialect_name='sqlite') assert context.get_tag_argument() == 'hi' """ ) command.upgrade(self.cfg, b, sql=True, tag="hi") command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True, tag="hi") def test_tag_cfg_arg(self): env_file_fixture( """ context.configure(dialect_name='sqlite', tag='there') assert context.get_tag_argument() == 'there' """ ) command.upgrade(self.cfg, b, sql=True, tag="hi") command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True, tag="hi") def test_tag_None(self): env_file_fixture( """ context.configure(dialect_name='sqlite') assert context.get_tag_argument() is None """ ) command.upgrade(self.cfg, b, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) def test_downgrade_wo_colon(self): env_file_fixture( """ context.configure(dialect_name='sqlite') """ ) assert_raises_message( util.CommandError, "downgrade with --sql requires :", command.downgrade, self.cfg, b, sql=True, ) def test_upgrade_with_output_encoding(self): env_file_fixture( """ url = config.get_main_option('sqlalchemy.url') context.configure(url=url, output_encoding='utf-8') assert not context.requires_connection() """ ) command.upgrade(self.cfg, a, sql=True) command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True) def test_running_comments_not_in_sql(self): message = "this is a very long \nand multiline\nmessage" d = command.revision(self.cfg, message=message) with capture_context_buffer(transactional_ddl=True) as buf: command.upgrade(self.cfg, "%s:%s" % (a, d.revision), sql=True) assert not re.match( r".*-- .*and multiline", buf.getvalue(), re.S | re.M ) def test_starting_rev_pre_context_abbreviated(self): env_file_fixture( """ assert context.get_starting_revision_argument() == '%s' """ % b[0:4] ) command.upgrade(self.cfg, "%s:%s" % (b[0:4], c), sql=True) command.stamp(self.cfg, "%s:%s" % (b[0:4], c), sql=True) command.downgrade(self.cfg, "%s:%s" % (b[0:4], a), sql=True) def test_destination_rev_pre_context_abbreviated(self): env_file_fixture( """ assert context.get_revision_argument() == '%s' """ % b[0:4] ) command.upgrade(self.cfg, "%s:%s" % (a, b[0:4]), sql=True) command.stamp(self.cfg, b[0:4], sql=True) command.downgrade(self.cfg, "%s:%s" % (c, b[0:4]), sql=True) def test_starting_rev_context_runs_abbreviated(self): env_file_fixture( """ context.configure(dialect_name='sqlite') context.run_migrations() """ ) command.upgrade(self.cfg, "%s:%s" % (b[0:4], c), sql=True) command.downgrade(self.cfg, "%s:%s" % (b[0:4], a), sql=True) def test_destination_rev_context_runs_abbreviated(self): env_file_fixture( """ context.configure(dialect_name='sqlite') context.run_migrations() """ ) command.upgrade(self.cfg, "%s:%s" % (a, b[0:4]), sql=True) command.stamp(self.cfg, b[0:4], sql=True) command.downgrade(self.cfg, "%s:%s" % (c, b[0:4]), sql=True) alembic-rel_1_7_6/tests/test_op.py000066400000000000000000001234231417624537100173350ustar00rootroot00000000000000"""Test against the builders in the op.* module.""" from sqlalchemy import Boolean from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import exc from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import UniqueConstraint from sqlalchemy.sql import column from sqlalchemy.sql import func from sqlalchemy.sql import text from sqlalchemy.sql.schema import quoted_name from alembic import op from alembic.operations import ops from alembic.operations import schemaobj from alembic.testing import assert_raises_message from alembic.testing import combinations from alembic.testing import config from alembic.testing import eq_ from alembic.testing import is_not_ from alembic.testing import mock from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase from alembic.util import sqla_compat class OpTest(TestBase): def test_rename_table(self): context = op_fixture() op.rename_table("t1", "t2") context.assert_("ALTER TABLE t1 RENAME TO t2") def test_rename_table_schema(self): context = op_fixture() op.rename_table("t1", "t2", schema="foo") context.assert_("ALTER TABLE foo.t1 RENAME TO foo.t2") def test_create_index_arbitrary_expr(self): context = op_fixture() op.create_index("name", "tname", [func.foo(column("x"))]) context.assert_("CREATE INDEX name ON tname (foo(x))") def test_add_column_schema_hard_quoting(self): context = op_fixture("postgresql") op.add_column( "somename", Column("colname", String), schema=quoted_name("some.schema", quote=True), ) context.assert_( 'ALTER TABLE "some.schema".somename ADD COLUMN colname VARCHAR' ) def test_rename_table_schema_hard_quoting(self): context = op_fixture("postgresql") op.rename_table( "t1", "t2", schema=quoted_name("some.schema", quote=True) ) context.assert_('ALTER TABLE "some.schema".t1 RENAME TO t2') def test_add_constraint_schema_hard_quoting(self): context = op_fixture("postgresql") op.create_check_constraint( "ck_user_name_len", "user_table", func.len(column("name")) > 5, schema=quoted_name("some.schema", quote=True), ) context.assert_( 'ALTER TABLE "some.schema".user_table ADD ' "CONSTRAINT ck_user_name_len CHECK (len(name) > 5)" ) def test_create_index_quoting(self): context = op_fixture("postgresql") op.create_index("geocoded", "locations", ["IShouldBeQuoted"]) context.assert_( 'CREATE INDEX geocoded ON locations ("IShouldBeQuoted")' ) def test_create_index_expressions(self): context = op_fixture() op.create_index("geocoded", "locations", [text("lower(coordinates)")]) context.assert_( "CREATE INDEX geocoded ON locations (lower(coordinates))" ) def test_add_column(self): context = op_fixture() op.add_column("t1", Column("c1", Integer, nullable=False)) context.assert_("ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL") def test_add_column_already_attached(self): context = op_fixture() c1 = Column("c1", Integer, nullable=False) Table("t", MetaData(), c1) op.add_column("t1", c1) context.assert_("ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL") def test_add_column_w_check(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, CheckConstraint("c1 > 5"), nullable=False), ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL CHECK (c1 > 5)" ) def test_add_column_schema(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, nullable=False), schema="foo" ) context.assert_("ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL") def test_add_column_with_default(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, nullable=False, server_default="12") ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 INTEGER DEFAULT '12' NOT NULL" ) def test_add_column_with_index(self): context = op_fixture() op.add_column("t1", Column("c1", Integer, nullable=False, index=True)) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL", "CREATE INDEX ix_t1_c1 ON t1 (c1)", ) def test_add_column_schema_with_default(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, nullable=False, server_default="12"), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER DEFAULT '12' NOT NULL" ) def test_add_column_fk(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, ForeignKey("c2.id"), nullable=False) ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL", "ALTER TABLE t1 ADD FOREIGN KEY(c1) REFERENCES c2 (id)", ) def test_add_column_schema_fk(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, ForeignKey("c2.id"), nullable=False), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL", "ALTER TABLE foo.t1 ADD FOREIGN KEY(c1) REFERENCES c2 (id)", ) def test_add_column_schema_type(self): """Test that a schema type generates its constraints....""" context = op_fixture() op.add_column( "t1", Column("c1", Boolean(create_constraint=True), nullable=False) ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 BOOLEAN NOT NULL", "ALTER TABLE t1 ADD CHECK (c1 IN (0, 1))", ) def test_add_column_schema_schema_type(self): """Test that a schema type generates its constraints....""" context = op_fixture() op.add_column( "t1", Column("c1", Boolean(create_constraint=True), nullable=False), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 ADD COLUMN c1 BOOLEAN NOT NULL", "ALTER TABLE foo.t1 ADD CHECK (c1 IN (0, 1))", ) def test_add_column_schema_type_checks_rule(self): """Test that a schema type doesn't generate a constraint based on check rule.""" context = op_fixture("postgresql") op.add_column( "t1", Column("c1", Boolean(create_constraint=True), nullable=False) ) context.assert_("ALTER TABLE t1 ADD COLUMN c1 BOOLEAN NOT NULL") def test_add_column_fk_self_referential(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, ForeignKey("t1.c2"), nullable=False) ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL", "ALTER TABLE t1 ADD FOREIGN KEY(c1) REFERENCES t1 (c2)", ) def test_add_column_schema_fk_self_referential(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, ForeignKey("foo.t1.c2"), nullable=False), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL", "ALTER TABLE foo.t1 ADD FOREIGN KEY(c1) REFERENCES foo.t1 (c2)", ) def test_add_column_fk_schema(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, ForeignKey("remote.t2.c2"), nullable=False), ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 INTEGER NOT NULL", "ALTER TABLE t1 ADD FOREIGN KEY(c1) REFERENCES remote.t2 (c2)", ) def test_add_column_schema_fk_schema(self): context = op_fixture() op.add_column( "t1", Column("c1", Integer, ForeignKey("remote.t2.c2"), nullable=False), schema="foo", ) context.assert_( "ALTER TABLE foo.t1 ADD COLUMN c1 INTEGER NOT NULL", "ALTER TABLE foo.t1 ADD FOREIGN KEY(c1) REFERENCES remote.t2 (c2)", ) def test_drop_column(self): context = op_fixture() op.drop_column("t1", "c1") context.assert_("ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_schema(self): context = op_fixture() op.drop_column("t1", "c1", schema="foo") context.assert_("ALTER TABLE foo.t1 DROP COLUMN c1") def test_alter_column_nullable(self): context = op_fixture() op.alter_column("t", "c", nullable=True) context.assert_( # TODO: not sure if this is PG only or standard # SQL "ALTER TABLE t ALTER COLUMN c DROP NOT NULL" ) def test_alter_column_schema_nullable(self): context = op_fixture() op.alter_column("t", "c", nullable=True, schema="foo") context.assert_( # TODO: not sure if this is PG only or standard # SQL "ALTER TABLE foo.t ALTER COLUMN c DROP NOT NULL" ) def test_alter_column_not_nullable(self): context = op_fixture() op.alter_column("t", "c", nullable=False) context.assert_( # TODO: not sure if this is PG only or standard # SQL "ALTER TABLE t ALTER COLUMN c SET NOT NULL" ) def test_alter_column_schema_not_nullable(self): context = op_fixture() op.alter_column("t", "c", nullable=False, schema="foo") context.assert_( # TODO: not sure if this is PG only or standard # SQL "ALTER TABLE foo.t ALTER COLUMN c SET NOT NULL" ) def test_alter_column_rename(self): context = op_fixture() op.alter_column("t", "c", new_column_name="x") context.assert_("ALTER TABLE t RENAME c TO x") def test_alter_column_schema_rename(self): context = op_fixture() op.alter_column("t", "c", new_column_name="x", schema="foo") context.assert_("ALTER TABLE foo.t RENAME c TO x") def test_alter_column_type(self): context = op_fixture() op.alter_column("t", "c", type_=String(50)) context.assert_("ALTER TABLE t ALTER COLUMN c TYPE VARCHAR(50)") def test_alter_column_schema_type(self): context = op_fixture() op.alter_column("t", "c", type_=String(50), schema="foo") context.assert_("ALTER TABLE foo.t ALTER COLUMN c TYPE VARCHAR(50)") def test_alter_column_set_default(self): context = op_fixture() op.alter_column("t", "c", server_default="q") context.assert_("ALTER TABLE t ALTER COLUMN c SET DEFAULT 'q'") def test_alter_column_schema_set_default(self): context = op_fixture() op.alter_column("t", "c", server_default="q", schema="foo") context.assert_("ALTER TABLE foo.t ALTER COLUMN c SET DEFAULT 'q'") def test_alter_column_set_compiled_default(self): context = op_fixture() op.alter_column( "t", "c", server_default=func.utc_thing(func.current_timestamp()) ) context.assert_( "ALTER TABLE t ALTER COLUMN c " "SET DEFAULT utc_thing(CURRENT_TIMESTAMP)" ) def test_alter_column_schema_set_compiled_default(self): context = op_fixture() op.alter_column( "t", "c", server_default=func.utc_thing(func.current_timestamp()), schema="foo", ) context.assert_( "ALTER TABLE foo.t ALTER COLUMN c " "SET DEFAULT utc_thing(CURRENT_TIMESTAMP)" ) def test_alter_column_drop_default(self): context = op_fixture() op.alter_column("t", "c", server_default=None) context.assert_("ALTER TABLE t ALTER COLUMN c DROP DEFAULT") def test_alter_column_schema_drop_default(self): context = op_fixture() op.alter_column("t", "c", server_default=None, schema="foo") context.assert_("ALTER TABLE foo.t ALTER COLUMN c DROP DEFAULT") @combinations( (lambda: sqla_compat.Computed("foo * 5"), lambda: None), (lambda: None, lambda: sqla_compat.Computed("foo * 5")), ( lambda: sqla_compat.Computed("foo * 42"), lambda: sqla_compat.Computed("foo * 5"), ), ) @config.requirements.computed_columns_api def test_alter_column_computed_not_supported(self, sd, esd): op_fixture() assert_raises_message( exc.CompileError, 'Adding or removing a "computed" construct, e.g. ' "GENERATED ALWAYS AS, to or from an existing column is not " "supported.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) @combinations( (lambda: sqla_compat.Identity(), lambda: None), (lambda: None, lambda: sqla_compat.Identity()), ( lambda: sqla_compat.Identity(), lambda: sqla_compat.Identity(), ), ) @config.requirements.identity_columns_api def test_alter_column_identity_not_supported(self, sd, esd): op_fixture() assert_raises_message( exc.CompileError, 'Adding, removing or modifying an "identity" construct, ' "e.g. GENERATED AS IDENTITY, to or from an existing " "column is not supported in this dialect.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) def test_alter_column_schema_type_unnamed(self): context = op_fixture("mssql", native_boolean=False) op.alter_column("t", "c", type_=Boolean(create_constraint=True)) context.assert_( "ALTER TABLE t ALTER COLUMN c BIT", "ALTER TABLE t ADD CHECK (c IN (0, 1))", ) def test_alter_column_schema_schema_type_unnamed(self): context = op_fixture("mssql", native_boolean=False) op.alter_column( "t", "c", type_=Boolean(create_constraint=True), schema="foo" ) context.assert_( "ALTER TABLE foo.t ALTER COLUMN c BIT", "ALTER TABLE foo.t ADD CHECK (c IN (0, 1))", ) def test_alter_column_schema_type_named(self): context = op_fixture("mssql", native_boolean=False) op.alter_column( "t", "c", type_=Boolean(name="xyz", create_constraint=True) ) context.assert_( "ALTER TABLE t ALTER COLUMN c BIT", "ALTER TABLE t ADD CONSTRAINT xyz CHECK (c IN (0, 1))", ) def test_alter_column_schema_schema_type_named(self): context = op_fixture("mssql", native_boolean=False) op.alter_column( "t", "c", type_=Boolean(name="xyz", create_constraint=True), schema="foo", ) context.assert_( "ALTER TABLE foo.t ALTER COLUMN c BIT", "ALTER TABLE foo.t ADD CONSTRAINT xyz CHECK (c IN (0, 1))", ) @combinations((True,), (False,), argnames="pass_existing_type") @combinations((True,), (False,), argnames="change_nullability") def test_generic_alter_column_type_and_nullability( self, pass_existing_type, change_nullability ): # this test is also on the mssql dialect in test_mssql context = op_fixture() args = dict(type_=Integer) if pass_existing_type: args["existing_type"] = String(15) if change_nullability: args["nullable"] = False op.alter_column("t", "c", **args) if change_nullability: context.assert_( "ALTER TABLE t ALTER COLUMN c SET NOT NULL", "ALTER TABLE t ALTER COLUMN c TYPE INTEGER", ) else: context.assert_("ALTER TABLE t ALTER COLUMN c TYPE INTEGER") def test_alter_column_schema_type_existing_type(self): context = op_fixture("mssql", native_boolean=False) op.alter_column( "t", "c", type_=String(10), existing_type=Boolean(name="xyz", create_constraint=True), ) context.assert_( "ALTER TABLE t DROP CONSTRAINT xyz", "ALTER TABLE t ALTER COLUMN c VARCHAR(10)", ) def test_alter_column_schema_schema_type_existing_type(self): context = op_fixture("mssql", native_boolean=False) op.alter_column( "t", "c", type_=String(10), existing_type=Boolean(name="xyz", create_constraint=True), schema="foo", ) context.assert_( "ALTER TABLE foo.t DROP CONSTRAINT xyz", "ALTER TABLE foo.t ALTER COLUMN c VARCHAR(10)", ) def test_alter_column_schema_type_existing_type_no_const(self): context = op_fixture("postgresql") op.alter_column("t", "c", type_=String(10), existing_type=Boolean()) context.assert_("ALTER TABLE t ALTER COLUMN c TYPE VARCHAR(10)") def test_alter_column_schema_schema_type_existing_type_no_const(self): context = op_fixture("postgresql") op.alter_column( "t", "c", type_=String(10), existing_type=Boolean(), schema="foo" ) context.assert_("ALTER TABLE foo.t ALTER COLUMN c TYPE VARCHAR(10)") def test_alter_column_schema_type_existing_type_no_new_type(self): context = op_fixture("postgresql") op.alter_column("t", "c", nullable=False, existing_type=Boolean()) context.assert_("ALTER TABLE t ALTER COLUMN c SET NOT NULL") def test_alter_column_schema_schema_type_existing_type_no_new_type(self): context = op_fixture("postgresql") op.alter_column( "t", "c", nullable=False, existing_type=Boolean(), schema="foo" ) context.assert_("ALTER TABLE foo.t ALTER COLUMN c SET NOT NULL") def test_add_foreign_key(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"] ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES t2 (bat, hoho)" ) def test_add_foreign_key_schema(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], source_schema="foo2", referent_schema="bar2", ) context.assert_( "ALTER TABLE foo2.t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES bar2.t2 (bat, hoho)" ) def test_add_foreign_key_schema_same_tablename(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t1", ["foo", "bar"], ["bat", "hoho"], source_schema="foo2", referent_schema="bar2", ) context.assert_( "ALTER TABLE foo2.t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES bar2.t1 (bat, hoho)" ) def test_add_foreign_key_onupdate(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], onupdate="CASCADE", ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES t2 (bat, hoho) ON UPDATE CASCADE" ) def test_add_foreign_key_ondelete(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], ondelete="CASCADE", ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES t2 (bat, hoho) ON DELETE CASCADE" ) def test_add_foreign_key_deferrable(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], deferrable=True, ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES t2 (bat, hoho) DEFERRABLE" ) def test_add_foreign_key_initially(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], initially="deferred", ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES t2 (bat, hoho) INITIALLY deferred" ) @config.requirements.foreign_key_match def test_add_foreign_key_match(self): context = op_fixture() op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], match="SIMPLE", ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test FOREIGN KEY(foo, bar) " "REFERENCES t2 (bat, hoho) MATCH SIMPLE" ) def test_add_foreign_key_dialect_kw(self): op_fixture() with mock.patch("sqlalchemy.schema.ForeignKeyConstraint") as fkc: op.create_foreign_key( "fk_test", "t1", "t2", ["foo", "bar"], ["bat", "hoho"], foobar_arg="xyz", ) if config.requirements.foreign_key_match.enabled: eq_( fkc.mock_calls[0], mock.call( ["foo", "bar"], ["t2.bat", "t2.hoho"], onupdate=None, ondelete=None, name="fk_test", foobar_arg="xyz", deferrable=None, initially=None, match=None, ), ) else: eq_( fkc.mock_calls[0], mock.call( ["foo", "bar"], ["t2.bat", "t2.hoho"], onupdate=None, ondelete=None, name="fk_test", foobar_arg="xyz", deferrable=None, initially=None, ), ) def test_add_foreign_key_self_referential(self): context = op_fixture() op.create_foreign_key("fk_test", "t1", "t1", ["foo"], ["bar"]) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT fk_test " "FOREIGN KEY(foo) REFERENCES t1 (bar)" ) def test_add_primary_key_constraint(self): context = op_fixture() op.create_primary_key("pk_test", "t1", ["foo", "bar"]) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT pk_test PRIMARY KEY (foo, bar)" ) def test_add_primary_key_constraint_schema(self): context = op_fixture() op.create_primary_key("pk_test", "t1", ["foo"], schema="bar") context.assert_( "ALTER TABLE bar.t1 ADD CONSTRAINT pk_test PRIMARY KEY (foo)" ) def test_add_check_constraint(self): context = op_fixture() op.create_check_constraint( "ck_user_name_len", "user_table", func.len(column("name")) > 5 ) context.assert_( "ALTER TABLE user_table ADD CONSTRAINT ck_user_name_len " "CHECK (len(name) > 5)" ) def test_add_check_constraint_schema(self): context = op_fixture() op.create_check_constraint( "ck_user_name_len", "user_table", func.len(column("name")) > 5, schema="foo", ) context.assert_( "ALTER TABLE foo.user_table ADD CONSTRAINT ck_user_name_len " "CHECK (len(name) > 5)" ) def test_add_unique_constraint(self): context = op_fixture() op.create_unique_constraint("uk_test", "t1", ["foo", "bar"]) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT uk_test UNIQUE (foo, bar)" ) def test_add_unique_constraint_schema(self): context = op_fixture() op.create_unique_constraint( "uk_test", "t1", ["foo", "bar"], schema="foo" ) context.assert_( "ALTER TABLE foo.t1 ADD CONSTRAINT uk_test UNIQUE (foo, bar)" ) def test_drop_constraint(self): context = op_fixture() op.drop_constraint("foo_bar_bat", "t1") context.assert_("ALTER TABLE t1 DROP CONSTRAINT foo_bar_bat") def test_drop_constraint_schema(self): context = op_fixture() op.drop_constraint("foo_bar_bat", "t1", schema="foo") context.assert_("ALTER TABLE foo.t1 DROP CONSTRAINT foo_bar_bat") def test_create_index(self): context = op_fixture() op.create_index("ik_test", "t1", ["foo", "bar"]) context.assert_("CREATE INDEX ik_test ON t1 (foo, bar)") def test_create_unique_index(self): context = op_fixture() op.create_index("ik_test", "t1", ["foo", "bar"], unique=True) context.assert_("CREATE UNIQUE INDEX ik_test ON t1 (foo, bar)") def test_create_index_quote_flag(self): context = op_fixture() op.create_index("ik_test", "t1", ["foo", "bar"], quote=True) context.assert_('CREATE INDEX "ik_test" ON t1 (foo, bar)') def test_create_index_table_col_event(self): context = op_fixture() op.create_index( "ik_test", "tbl_with_auto_appended_column", ["foo", "bar"] ) context.assert_( "CREATE INDEX ik_test ON tbl_with_auto_appended_column (foo, bar)" ) def test_add_unique_constraint_col_event(self): context = op_fixture() op.create_unique_constraint( "ik_test", "tbl_with_auto_appended_column", ["foo", "bar"] ) context.assert_( "ALTER TABLE tbl_with_auto_appended_column " "ADD CONSTRAINT ik_test UNIQUE (foo, bar)" ) def test_create_index_schema(self): context = op_fixture() op.create_index("ik_test", "t1", ["foo", "bar"], schema="foo") context.assert_("CREATE INDEX ik_test ON foo.t1 (foo, bar)") def test_drop_index(self): context = op_fixture() op.drop_index("ik_test") context.assert_("DROP INDEX ik_test") def test_drop_index_schema(self): context = op_fixture() op.drop_index("ik_test", schema="foo") context.assert_("DROP INDEX foo.ik_test") def test_drop_table(self): context = op_fixture() op.drop_table("tb_test") context.assert_("DROP TABLE tb_test") def test_drop_table_schema(self): context = op_fixture() op.drop_table("tb_test", schema="foo") context.assert_("DROP TABLE foo.tb_test") def test_create_table_selfref(self): context = op_fixture() op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("st_id", Integer, ForeignKey("some_table.id")), ) context.assert_( "CREATE TABLE some_table (" "id INTEGER NOT NULL, " "st_id INTEGER, " "PRIMARY KEY (id), " "FOREIGN KEY(st_id) REFERENCES some_table (id))" ) def test_create_table_check_constraint(self): context = op_fixture() t1 = op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer), CheckConstraint("foo_id>5", name="ck_1"), ) context.assert_( "CREATE TABLE some_table (" "id INTEGER NOT NULL, " "foo_id INTEGER, " "PRIMARY KEY (id), " "CONSTRAINT ck_1 CHECK (foo_id>5))" ) ck = [c for c in t1.constraints if isinstance(c, CheckConstraint)] eq_(ck[0].name, "ck_1") def test_create_table_unique_constraint(self): context = op_fixture() t1 = op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer), UniqueConstraint("foo_id", name="uq_1"), ) context.assert_( "CREATE TABLE some_table (" "id INTEGER NOT NULL, " "foo_id INTEGER, " "PRIMARY KEY (id), " "CONSTRAINT uq_1 UNIQUE (foo_id))" ) uq = [c for c in t1.constraints if isinstance(c, UniqueConstraint)] eq_(uq[0].name, "uq_1") def test_create_table_unique_flag(self): context = op_fixture() t1 = op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer, unique=True), ) context.assert_( "CREATE TABLE some_table (id INTEGER NOT NULL, foo_id INTEGER, " "PRIMARY KEY (id), UNIQUE (foo_id))" ) uq = [c for c in t1.constraints if isinstance(c, UniqueConstraint)] assert uq def test_create_table_index_flag(self): context = op_fixture() t1 = op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer, index=True), ) context.assert_( "CREATE TABLE some_table (id INTEGER NOT NULL, foo_id INTEGER, " "PRIMARY KEY (id))", "CREATE INDEX ix_some_table_foo_id ON some_table (foo_id)", ) assert t1.indexes def test_create_table_index(self): context = op_fixture() t1 = op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer), Index("ix_1", "foo_id"), ) context.assert_( "CREATE TABLE some_table (" "id INTEGER NOT NULL, " "foo_id INTEGER, " "PRIMARY KEY (id))", "CREATE INDEX ix_1 ON some_table (foo_id)", ) ix = list(t1.indexes) eq_(ix[0].name, "ix_1") def test_create_table_fk_and_schema(self): context = op_fixture() t1 = op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer, ForeignKey("foo.id")), schema="schema", ) context.assert_( "CREATE TABLE schema.some_table (" "id INTEGER NOT NULL, " "foo_id INTEGER, " "PRIMARY KEY (id), " "FOREIGN KEY(foo_id) REFERENCES foo (id))" ) eq_(t1.c.id.name, "id") eq_(t1.schema, "schema") def test_create_table_no_pk(self): context = op_fixture() t1 = op.create_table( "some_table", Column("x", Integer), Column("y", Integer), Column("z", Integer), ) context.assert_( "CREATE TABLE some_table (x INTEGER, y INTEGER, z INTEGER)" ) assert not t1.primary_key def test_create_table_two_fk(self): context = op_fixture() op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("foo_id", Integer, ForeignKey("foo.id")), Column("foo_bar", Integer, ForeignKey("foo.bar")), ) context.assert_( "CREATE TABLE some_table (" "id INTEGER NOT NULL, " "foo_id INTEGER, " "foo_bar INTEGER, " "PRIMARY KEY (id), " "FOREIGN KEY(foo_id) REFERENCES foo (id), " "FOREIGN KEY(foo_bar) REFERENCES foo (bar))" ) def test_inline_literal(self): context = op_fixture() from sqlalchemy.sql import table, column from sqlalchemy import String, Integer account = table( "account", column("name", String), column("id", Integer) ) op.execute( account.update() .where(account.c.name == op.inline_literal("account 1")) .values({"name": op.inline_literal("account 2")}) ) op.execute( account.update() .where(account.c.id == op.inline_literal(1)) .values({"id": op.inline_literal(2)}) ) context.assert_( "UPDATE account SET name='account 2' " "WHERE account.name = 'account 1'", "UPDATE account SET id=2 WHERE account.id = 1", ) def test_cant_op(self): if hasattr(op, "_proxy"): del op._proxy assert_raises_message( NameError, "Can't invoke function 'inline_literal', as the " "proxy object has not yet been established " "for the Alembic 'Operations' class. " "Try placing this code inside a callable.", op.inline_literal, "asdf", ) def test_naming_changes(self): context = op_fixture() op.alter_column("t", "c", new_column_name="x") context.assert_("ALTER TABLE t RENAME c TO x") context = op_fixture("mysql") op.drop_constraint("f1", "t1", type_="foreignkey") context.assert_("ALTER TABLE t1 DROP FOREIGN KEY f1") def test_naming_changes_drop_idx(self): context = op_fixture("mssql") op.drop_index("ik_test", table_name="t1") context.assert_("DROP INDEX ik_test ON t1") @config.requirements.comments def test_create_table_comment_op(self): context = op_fixture() op.create_table_comment("some_table", "table comment") context.assert_("COMMENT ON TABLE some_table IS 'table comment'") @config.requirements.comments def test_drop_table_comment_op(self): context = op_fixture() op.drop_table_comment("some_table") context.assert_("COMMENT ON TABLE some_table IS NULL") class SQLModeOpTest(TestBase): def test_auto_literals(self): context = op_fixture(as_sql=True, literal_binds=True) from sqlalchemy.sql import table, column from sqlalchemy import String, Integer account = table( "account", column("name", String), column("id", Integer) ) op.execute( account.update() .where(account.c.name == op.inline_literal("account 1")) .values({"name": op.inline_literal("account 2")}) ) op.execute(text("update table set foo=:bar").bindparams(bar="bat")) context.assert_( "UPDATE account SET name='account 2' " "WHERE account.name = 'account 1'", "update table set foo='bat'", ) def test_create_table_literal_binds(self): context = op_fixture(as_sql=True, literal_binds=True) op.create_table( "some_table", Column("id", Integer, primary_key=True), Column("st_id", Integer, ForeignKey("some_table.id")), ) context.assert_( "CREATE TABLE some_table (id INTEGER NOT NULL, st_id INTEGER, " "PRIMARY KEY (id), FOREIGN KEY(st_id) REFERENCES some_table (id))" ) class CustomOpTest(TestBase): def test_custom_op(self): from alembic.operations import Operations, MigrateOperation @Operations.register_operation("create_sequence") class CreateSequenceOp(MigrateOperation): """Create a SEQUENCE.""" def __init__(self, sequence_name, **kw): self.sequence_name = sequence_name self.kw = kw @classmethod def create_sequence(cls, operations, sequence_name, **kw): """Issue a "CREATE SEQUENCE" instruction.""" op = CreateSequenceOp(sequence_name, **kw) return operations.invoke(op) @Operations.implementation_for(CreateSequenceOp) def create_sequence(operations, operation): operations.execute("CREATE SEQUENCE %s" % operation.sequence_name) context = op_fixture() op.create_sequence("foob") context.assert_("CREATE SEQUENCE foob") class ObjectFromToTest(TestBase): """Test operation round trips for to_obj() / from_obj(). Previously, these needed to preserve the "original" item to this, but this makes them harder to work with. As of #803 the constructs try to behave more intelligently about the state they were given, so that they can both "reverse" themselves but also take into accout their current state. """ def test_drop_index(self): schema_obj = schemaobj.SchemaObjects() idx = schema_obj.index("x", "y", ["z"]) op = ops.DropIndexOp.from_index(idx) is_not_(op.to_index(), idx) def test_drop_index_add_kw(self): schema_obj = schemaobj.SchemaObjects() idx = schema_obj.index("x", "y", ["z"]) op = ops.DropIndexOp.from_index(idx) op.kw["postgresql_concurrently"] = True eq_(op.to_index().dialect_kwargs["postgresql_concurrently"], True) eq_( op.reverse().to_index().dialect_kwargs["postgresql_concurrently"], True, ) def test_create_index(self): schema_obj = schemaobj.SchemaObjects() idx = schema_obj.index("x", "y", ["z"]) op = ops.CreateIndexOp.from_index(idx) is_not_(op.to_index(), idx) def test_create_index_add_kw(self): schema_obj = schemaobj.SchemaObjects() idx = schema_obj.index("x", "y", ["z"]) op = ops.CreateIndexOp.from_index(idx) op.kw["postgresql_concurrently"] = True eq_(op.to_index().dialect_kwargs["postgresql_concurrently"], True) eq_( op.reverse().to_index().dialect_kwargs["postgresql_concurrently"], True, ) def test_drop_table(self): schema_obj = schemaobj.SchemaObjects() table = schema_obj.table( "x", Column("q", Integer), info={"custom": "value"}, prefixes=["FOREIGN"], postgresql_partition_by="x", comment="some comment", ) op = ops.DropTableOp.from_table(table) is_not_(op.to_table(), table) eq_(op.to_table().comment, table.comment) eq_(op.to_table().info, table.info) eq_(op.to_table()._prefixes, table._prefixes) def test_drop_table_add_kw(self): schema_obj = schemaobj.SchemaObjects() table = schema_obj.table("x", Column("q", Integer)) op = ops.DropTableOp.from_table(table) op.table_kw["postgresql_partition_by"] = "x" eq_(op.to_table().dialect_kwargs["postgresql_partition_by"], "x") eq_( op.reverse().to_table().dialect_kwargs["postgresql_partition_by"], "x", ) def test_create_table(self): schema_obj = schemaobj.SchemaObjects() table = schema_obj.table( "x", Column("q", Integer), postgresql_partition_by="x", prefixes=["FOREIGN"], info={"custom": "value"}, comment="some comment", ) op = ops.CreateTableOp.from_table(table) is_not_(op.to_table(), table) eq_(op.to_table().comment, table.comment) eq_(op.to_table().info, table.info) eq_(op.to_table()._prefixes, table._prefixes) def test_create_table_add_kw(self): schema_obj = schemaobj.SchemaObjects() table = schema_obj.table("x", Column("q", Integer)) op = ops.CreateTableOp.from_table(table) op.kw["postgresql_partition_by"] = "x" eq_(op.to_table().dialect_kwargs["postgresql_partition_by"], "x") eq_( op.reverse().to_table().dialect_kwargs["postgresql_partition_by"], "x", ) def test_create_unique_constraint(self): schema_obj = schemaobj.SchemaObjects() const = schema_obj.unique_constraint("x", "foobar", ["a"]) op = ops.AddConstraintOp.from_constraint(const) is_not_(op.to_constraint(), const) def test_create_unique_constraint_add_kw(self): schema_obj = schemaobj.SchemaObjects() const = schema_obj.unique_constraint("x", "foobar", ["a"]) op = ops.AddConstraintOp.from_constraint(const) is_not_(op.to_constraint(), const) op.kw["sqlite_on_conflict"] = "IGNORE" eq_(op.to_constraint().dialect_kwargs["sqlite_on_conflict"], "IGNORE") eq_( op.reverse().to_constraint().dialect_kwargs["sqlite_on_conflict"], "IGNORE", ) def test_drop_unique_constraint(self): schema_obj = schemaobj.SchemaObjects() const = schema_obj.unique_constraint("x", "foobar", ["a"]) op = ops.DropConstraintOp.from_constraint(const) is_not_(op.to_constraint(), const) def test_drop_unique_constraint_change_name(self): schema_obj = schemaobj.SchemaObjects() const = schema_obj.unique_constraint("x", "foobar", ["a"]) op = ops.DropConstraintOp.from_constraint(const) op.constraint_name = "my_name" eq_(op.to_constraint().name, "my_name") eq_(op.reverse().to_constraint().name, "my_name") def test_drop_constraint_not_available(self): op = ops.DropConstraintOp("x", "y", type_="unique") assert_raises_message( ValueError, "constraint cannot be produced", op.to_constraint ) alembic-rel_1_7_6/tests/test_op_naming_convention.py000066400000000000000000000126061417624537100231300ustar00rootroot00000000000000from sqlalchemy import Boolean from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy.sql import column from sqlalchemy.sql import func from alembic import op from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase class AutoNamingConventionTest(TestBase): def test_add_check_constraint(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.create_check_constraint( "foo", "user_table", func.len(column("name")) > 5 ) context.assert_( "ALTER TABLE user_table ADD CONSTRAINT ck_user_table_foo " "CHECK (len(name) > 5)" ) def test_add_check_constraint_name_is_none(self): context = op_fixture(naming_convention={"ck": "ck_%(table_name)s_foo"}) op.create_check_constraint( None, "user_table", func.len(column("name")) > 5 ) context.assert_( "ALTER TABLE user_table ADD CONSTRAINT ck_user_table_foo " "CHECK (len(name) > 5)" ) def test_add_unique_constraint_name_is_none(self): context = op_fixture(naming_convention={"uq": "uq_%(table_name)s_foo"}) op.create_unique_constraint(None, "user_table", "x") context.assert_( "ALTER TABLE user_table " "ADD CONSTRAINT uq_user_table_foo UNIQUE (x)" ) def test_add_index_name_is_none(self): context = op_fixture(naming_convention={"ix": "ix_%(table_name)s_foo"}) op.create_index(None, "user_table", "x") context.assert_("CREATE INDEX ix_user_table_foo ON user_table (x)") def test_add_check_constraint_already_named_from_schema(self): m1 = MetaData( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) ck = CheckConstraint("im a constraint", name="cc1") Table("t", m1, Column("x"), ck) context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.create_table("some_table", Column("x", Integer, ck)) context.assert_( "CREATE TABLE some_table " "(x INTEGER CONSTRAINT ck_t_cc1 CHECK (im a constraint))" ) def test_add_check_constraint_inline_on_table(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.create_table( "some_table", Column("x", Integer), CheckConstraint("im a constraint", name="cc1"), ) context.assert_( "CREATE TABLE some_table " "(x INTEGER, CONSTRAINT ck_some_table_cc1 CHECK (im a constraint))" ) def test_add_check_constraint_inline_on_table_w_f(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.create_table( "some_table", Column("x", Integer), CheckConstraint("im a constraint", name=op.f("ck_some_table_cc1")), ) context.assert_( "CREATE TABLE some_table " "(x INTEGER, CONSTRAINT ck_some_table_cc1 CHECK (im a constraint))" ) def test_add_check_constraint_inline_on_column(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.create_table( "some_table", Column( "x", Integer, CheckConstraint("im a constraint", name="cc1") ), ) context.assert_( "CREATE TABLE some_table " "(x INTEGER CONSTRAINT ck_some_table_cc1 CHECK (im a constraint))" ) def test_add_check_constraint_inline_on_column_w_f(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.create_table( "some_table", Column( "x", Integer, CheckConstraint("im a constraint", name=op.f("ck_q_cc1")), ), ) context.assert_( "CREATE TABLE some_table " "(x INTEGER CONSTRAINT ck_q_cc1 CHECK (im a constraint))" ) def test_add_column_schema_type(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.add_column( "t1", Column( "c1", Boolean(name="foo", create_constraint=True), nullable=False, ), ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 BOOLEAN NOT NULL", "ALTER TABLE t1 ADD CONSTRAINT ck_t1_foo CHECK (c1 IN (0, 1))", ) def test_add_column_schema_type_w_f(self): context = op_fixture( naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) op.add_column( "t1", Column( "c1", Boolean(name=op.f("foo"), create_constraint=True), nullable=False, ), ) context.assert_( "ALTER TABLE t1 ADD COLUMN c1 BOOLEAN NOT NULL", "ALTER TABLE t1 ADD CONSTRAINT foo CHECK (c1 IN (0, 1))", ) alembic-rel_1_7_6/tests/test_oracle.py000066400000000000000000000317131417624537100201640ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import exc from sqlalchemy import Integer from alembic import command from alembic import op from alembic.testing import assert_raises_message from alembic.testing import combinations from alembic.testing import config from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.env import three_rev_fixture from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase from alembic.util import sqla_compat class FullEnvironmentTests(TestBase): @classmethod def setup_class(cls): staging_env() cls.cfg = cfg = _no_sql_testing_config("oracle") cls.a, cls.b, cls.c = three_rev_fixture(cfg) @classmethod def teardown_class(cls): clear_staging_env() def test_begin_comit(self): with capture_context_buffer(transactional_ddl=True) as buf: command.upgrade(self.cfg, self.a, sql=True) assert "SET TRANSACTION READ WRITE\n\n/" in buf.getvalue() assert "COMMIT\n\n/" in buf.getvalue() def test_batch_separator_default(self): with capture_context_buffer() as buf: command.upgrade(self.cfg, self.a, sql=True) assert "/" in buf.getvalue() assert ";" not in buf.getvalue() def test_batch_separator_custom(self): with capture_context_buffer(oracle_batch_separator="BYE") as buf: command.upgrade(self.cfg, self.a, sql=True) assert "BYE" in buf.getvalue() class OpTest(TestBase): def test_add_column(self): context = op_fixture("oracle") op.add_column("t1", Column("c1", Integer, nullable=False)) context.assert_("ALTER TABLE t1 ADD c1 INTEGER NOT NULL") def test_add_column_with_default(self): context = op_fixture("oracle") op.add_column( "t1", Column("c1", Integer, nullable=False, server_default="12") ) context.assert_("ALTER TABLE t1 ADD c1 INTEGER DEFAULT '12' NOT NULL") @config.requirements.comments def test_add_column_with_comment(self): context = op_fixture("oracle") op.add_column( "t1", Column("c1", Integer, nullable=False, comment="c1 comment") ) context.assert_( "ALTER TABLE t1 ADD c1 INTEGER NOT NULL", "COMMENT ON COLUMN t1.c1 IS 'c1 comment'", ) @config.requirements.computed_columns def test_add_column_computed(self): context = op_fixture("oracle") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Computed("foo * 5")), ) context.assert_( "ALTER TABLE t1 ADD some_column " "INTEGER GENERATED ALWAYS AS (foo * 5)" ) @combinations( (lambda: sqla_compat.Computed("foo * 5"), lambda: None), (lambda: None, lambda: sqla_compat.Computed("foo * 5")), ( lambda: sqla_compat.Computed("foo * 42"), lambda: sqla_compat.Computed("foo * 5"), ), ) @config.requirements.computed_columns def test_alter_column_computed_not_supported(self, sd, esd): op_fixture("oracle") assert_raises_message( exc.CompileError, 'Adding or removing a "computed" construct, e.g. ' "GENERATED ALWAYS AS, to or from an existing column is not " "supported.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) def test_alter_table_rename_oracle(self): context = op_fixture("oracle") op.rename_table("s", "t") context.assert_("ALTER TABLE s RENAME TO t") def test_alter_table_rename_schema_oracle(self): context = op_fixture("oracle") op.rename_table("s", "t", schema="myowner") context.assert_("ALTER TABLE myowner.s RENAME TO t") def test_alter_column_rename_oracle(self): context = op_fixture("oracle") op.alter_column("t", "c", new_column_name="x") context.assert_("ALTER TABLE t RENAME COLUMN c TO x") def test_alter_column_new_type(self): context = op_fixture("oracle") op.alter_column("t", "c", type_=Integer) context.assert_("ALTER TABLE t MODIFY c INTEGER") def test_alter_column_add_comment(self): context = op_fixture("oracle") op.alter_column("t", "c", type_=Integer, comment="c comment") context.assert_( "ALTER TABLE t MODIFY c INTEGER", "COMMENT ON COLUMN t.c IS 'c comment'", ) def test_alter_column_add_comment_quotes(self): context = op_fixture("oracle") op.alter_column("t", "c", type_=Integer, comment="c 'comment'") context.assert_( "ALTER TABLE t MODIFY c INTEGER", "COMMENT ON COLUMN t.c IS 'c ''comment'''", ) def test_alter_column_drop_comment(self): context = op_fixture("oracle") op.alter_column("t", "c", type_=Integer, comment=None) context.assert_( "ALTER TABLE t MODIFY c INTEGER", "COMMENT ON COLUMN t.c IS ''" ) def test_create_table_comment(self): # this is handled by SQLAlchemy's compilers context = op_fixture("oracle") op.create_table_comment("t2", comment="t2 table", schema="foo") context.assert_("COMMENT ON TABLE foo.t2 IS 't2 table'") def test_drop_table_comment(self): # this is handled by SQLAlchemy's compilers context = op_fixture("oracle") op.drop_table_comment("t2", existing_comment="t2 table", schema="foo") context.assert_("COMMENT ON TABLE foo.t2 IS ''") def test_drop_index(self): context = op_fixture("oracle") op.drop_index("my_idx", "my_table") context.assert_contains("DROP INDEX my_idx") def test_drop_column_w_default(self): context = op_fixture("oracle") op.drop_column("t1", "c1") context.assert_("ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_w_check(self): context = op_fixture("oracle") op.drop_column("t1", "c1") context.assert_("ALTER TABLE t1 DROP COLUMN c1") def test_alter_column_nullable_w_existing_type(self): context = op_fixture("oracle") op.alter_column("t", "c", nullable=True, existing_type=Integer) context.assert_("ALTER TABLE t MODIFY c NULL") def test_alter_column_not_nullable_w_existing_type(self): context = op_fixture("oracle") op.alter_column("t", "c", nullable=False, existing_type=Integer) context.assert_("ALTER TABLE t MODIFY c NOT NULL") def test_alter_column_nullable_w_new_type(self): context = op_fixture("oracle") op.alter_column("t", "c", nullable=True, type_=Integer) context.assert_( "ALTER TABLE t MODIFY c NULL", "ALTER TABLE t MODIFY c INTEGER" ) def test_alter_column_not_nullable_w_new_type(self): context = op_fixture("oracle") op.alter_column("t", "c", nullable=False, type_=Integer) context.assert_( "ALTER TABLE t MODIFY c NOT NULL", "ALTER TABLE t MODIFY c INTEGER" ) def test_alter_add_server_default(self): context = op_fixture("oracle") op.alter_column("t", "c", server_default="5") context.assert_("ALTER TABLE t MODIFY c DEFAULT '5'") def test_alter_replace_server_default(self): context = op_fixture("oracle") op.alter_column( "t", "c", server_default="5", existing_server_default="6" ) context.assert_("ALTER TABLE t MODIFY c DEFAULT '5'") def test_alter_remove_server_default(self): context = op_fixture("oracle") op.alter_column("t", "c", server_default=None) context.assert_("ALTER TABLE t MODIFY c DEFAULT NULL") def test_alter_do_everything(self): context = op_fixture("oracle") op.alter_column( "t", "c", new_column_name="c2", nullable=True, type_=Integer, server_default="5", ) context.assert_( "ALTER TABLE t MODIFY c NULL", "ALTER TABLE t MODIFY c DEFAULT '5'", "ALTER TABLE t MODIFY c INTEGER", "ALTER TABLE t RENAME COLUMN c TO c2", ) @config.requirements.comments def test_create_table_with_column_comments(self): context = op_fixture("oracle") op.create_table( "t2", Column("c1", Integer, primary_key=True), comment="t2 comment" ) context.assert_( "CREATE TABLE t2 (c1 INTEGER NOT NULL, PRIMARY KEY (c1))", "COMMENT ON TABLE t2 IS 't2 comment'", ) # TODO: when we add schema support # def test_alter_column_rename_oracle_schema(self): # context = op_fixture('oracle') # op.alter_column("t", "c", name="x", schema="y") # context.assert_( # 'ALTER TABLE y.t RENAME COLUMN c TO c2' # ) def _identity_qualification(self, kw): always = kw.get("always", False) if always is None: return "" qualification = "ALWAYS" if always else "BY DEFAULT" if kw.get("on_null", False): qualification += " ON NULL" return qualification @config.requirements.identity_columns @combinations( ({}, None), (dict(always=True), None), (dict(always=None, order=True), "ORDER"), ( dict(start=3, increment=33, maxvalue=99, cycle=True), "INCREMENT BY 33 START WITH 3 MAXVALUE 99 CYCLE", ), (dict(on_null=True, start=42), "START WITH 42"), ) def test_add_column_identity(self, kw, text): context = op_fixture("oracle") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Identity(**kw)), ) qualification = self._identity_qualification(kw) options = " (%s)" % text if text else "" context.assert_( "ALTER TABLE t1 ADD some_column " "INTEGER GENERATED %s AS IDENTITY%s" % (qualification, options) ) @config.requirements.identity_columns @combinations( ({}, None), (dict(always=True), None), (dict(always=None, cycle=True), "CYCLE"), ( dict(start=3, increment=33, maxvalue=99, cycle=True), "INCREMENT BY 33 START WITH 3 MAXVALUE 99 CYCLE", ), (dict(on_null=True, start=42), "START WITH 42"), ) def test_add_identity_to_column(self, kw, text): context = op_fixture("oracle") op.alter_column( "t1", "some_column", server_default=sqla_compat.Identity(**kw), existing_server_default=None, ) qualification = self._identity_qualification(kw) options = " (%s)" % text if text else "" context.assert_( "ALTER TABLE t1 MODIFY some_column " "GENERATED %s AS IDENTITY%s" % (qualification, options) ) @config.requirements.identity_columns def test_remove_identity_from_column(self): context = op_fixture("oracle") op.alter_column( "t1", "some_column", server_default=None, existing_server_default=sqla_compat.Identity(), ) context.assert_("ALTER TABLE t1 MODIFY some_column DROP IDENTITY") @config.requirements.identity_columns @combinations( ({}, dict(always=True), None), ( dict(always=True), dict(always=False, start=3), "START WITH 3", ), ( dict(always=True, start=3, increment=2, minvalue=-3, maxvalue=99), dict( always=True, start=3, increment=1, minvalue=-3, maxvalue=99, cycle=True, ), "INCREMENT BY 1 START WITH 3 MINVALUE -3 MAXVALUE 99 CYCLE", ), ( dict( always=False, start=3, maxvalue=9999, minvalue=0, ), dict(always=False, start=3, order=True, on_null=False, cache=2), "START WITH 3 CACHE 2 ORDER", ), ( dict(always=False), dict(always=None, minvalue=0), "MINVALUE 0", ), ) def test_change_identity_in_column(self, existing, updated, text): context = op_fixture("oracle") op.alter_column( "t1", "some_column", server_default=sqla_compat.Identity(**updated), existing_server_default=sqla_compat.Identity(**existing), ) qualification = self._identity_qualification(updated) options = " (%s)" % text if text else "" context.assert_( "ALTER TABLE t1 MODIFY some_column " "GENERATED %s AS IDENTITY%s" % (qualification, options) ) alembic-rel_1_7_6/tests/test_post_write.py000066400000000000000000000162661417624537100211240ustar00rootroot00000000000000import os import sys from alembic import command from alembic import util from alembic.script import write_hooks from alembic.testing import assert_raises_message from alembic.testing import combinations from alembic.testing import eq_ from alembic.testing import mock from alembic.testing import TestBase from alembic.testing.env import _get_staging_directory from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.util import compat class HookTest(TestBase): def test_register(self): @write_hooks.register("my_writer") def my_writer(path, config): return path assert "my_writer" in write_hooks._registry def test_invoke(self): my_formatter = mock.Mock() write_hooks.register("my_writer")(my_formatter) write_hooks._invoke("my_writer", "/some/path", {"option": 1}) my_formatter.assert_called_once_with("/some/path", {"option": 1}) class RunHookTest(TestBase): def setUp(self): self.env = staging_env() def tearDown(self): clear_staging_env() def test_generic(self): hook1 = mock.Mock() hook2 = mock.Mock() write_hooks.register("hook1")(hook1) write_hooks.register("hook2")(hook2) self.cfg = _no_sql_testing_config( directives=( "\n[post_write_hooks]\n" "hooks=hook1,hook2\n" "hook1.type=hook1\n" "hook1.arg1=foo\n" "hook2.type=hook2\n" "hook2.arg1=bar\n" ) ) rev = command.revision(self.cfg, message="x") eq_( hook1.mock_calls, [ mock.call( rev.path, {"type": "hook1", "arg1": "foo", "_hook_name": "hook1"}, ) ], ) eq_( hook2.mock_calls, [ mock.call( rev.path, {"type": "hook2", "arg1": "bar", "_hook_name": "hook2"}, ) ], ) def test_empty_section(self): self.cfg = _no_sql_testing_config( directives=("\n[post_write_hooks]\n") ) command.revision(self.cfg, message="x") def test_no_section(self): self.cfg = _no_sql_testing_config(directives="") command.revision(self.cfg, message="x") def test_empty_hooks(self): self.cfg = _no_sql_testing_config( directives=("\n[post_write_hooks]\n" "hooks=\n") ) command.revision(self.cfg, message="x") def test_no_type(self): self.cfg = _no_sql_testing_config( directives=( "\n[post_write_hooks]\n" "hooks=foo\n" "foo.bar=somebar\n" ) ) assert_raises_message( util.CommandError, "Key foo.type is required for post write hook 'foo'", command.revision, self.cfg, message="x", ) def test_console_scripts_entrypoint_missing(self): self.cfg = _no_sql_testing_config( directives=( "\n[post_write_hooks]\n" "hooks=black\n" "black.type=console_scripts\n" ) ) assert_raises_message( util.CommandError, "Key black.entrypoint is required for post write hook 'black'", command.revision, self.cfg, message="x", ) def _run_black_with_config( self, input_config, expected_additional_arguments_fn, cwd=None ): self.cfg = _no_sql_testing_config(directives=input_config) retVal = [ compat.EntryPoint( name="black", value="black.foo:patched_main", group="console_scripts", ), compat.EntryPoint( name="alembic", value="alembic.config:main", group="console_scripts", ), ] importlib_metadata_get = mock.Mock(return_value=retVal) with mock.patch( "alembic.util.compat.importlib_metadata_get", importlib_metadata_get, ), mock.patch( "alembic.script.write_hooks.subprocess" ) as mock_subprocess: rev = command.revision(self.cfg, message="x") eq_(importlib_metadata_get.mock_calls, [mock.call("console_scripts")]) eq_( mock_subprocess.mock_calls, [ mock.call.run( [ sys.executable, "-c", "import black.foo; black.foo.patched_main()", ] + expected_additional_arguments_fn(rev.path), cwd=cwd, ) ], ) def test_console_scripts(self): input_config = """ [post_write_hooks] hooks = black black.type = console_scripts black.entrypoint = black black.options = -l 79 """ def expected_additional_arguments_fn(rev_path): return [rev_path, "-l", "79"] self._run_black_with_config( input_config, expected_additional_arguments_fn ) @combinations(True, False) def test_filename_interpolation(self, posix): input_config = """ [post_write_hooks] hooks = black black.type = console_scripts black.entrypoint = black black.options = arg1 REVISION_SCRIPT_FILENAME 'multi-word arg' \ --flag1='REVISION_SCRIPT_FILENAME' """ def expected_additional_arguments_fn(rev_path): if compat.is_posix: return [ "arg1", rev_path, "multi-word arg", "--flag1=" + rev_path, ] else: return [ "arg1", rev_path, "'multi-word arg'", "--flag1='%s'" % rev_path, ] with mock.patch("alembic.util.compat.is_posix", posix): self._run_black_with_config( input_config, expected_additional_arguments_fn ) def test_path_in_config(self): input_config = """ [post_write_hooks] hooks = black black.type = console_scripts black.entrypoint = black black.options = arg1 REVISION_SCRIPT_FILENAME --config %(here)s/pyproject.toml """ def expected_additional_arguments_fn(rev_path): return [ "arg1", rev_path, "--config", os.path.abspath(_get_staging_directory()) + "/pyproject.toml", ] self._run_black_with_config( input_config, expected_additional_arguments_fn ) def test_black_with_cwd(self): input_config = """ [post_write_hooks] hooks = black black.type = console_scripts black.entrypoint = black black.cwd = /path/to/cwd """ def expected_additional_arguments_fn(rev_path): return [rev_path] self._run_black_with_config( input_config, expected_additional_arguments_fn, cwd="/path/to/cwd" ) alembic-rel_1_7_6/tests/test_postgresql.py000066400000000000000000001106331417624537100211210ustar00rootroot00000000000000from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import exc from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import Interval from sqlalchemy import MetaData from sqlalchemy import Numeric from sqlalchemy import Sequence from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import text from sqlalchemy import types from sqlalchemy.dialects.postgresql import ARRAY from sqlalchemy.dialects.postgresql import BYTEA from sqlalchemy.dialects.postgresql import HSTORE from sqlalchemy.dialects.postgresql import JSON from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.sql import column from sqlalchemy.sql import false from sqlalchemy.sql import table from alembic import autogenerate from alembic import command from alembic import op from alembic import util from alembic.autogenerate import api from alembic.autogenerate.compare import _compare_server_default from alembic.autogenerate.compare import _compare_tables from alembic.autogenerate.compare import _render_server_default_for_compare from alembic.migration import MigrationContext from alembic.operations import ops from alembic.script import ScriptDirectory from alembic.testing import assert_raises_message from alembic.testing import combinations from alembic.testing import config from alembic.testing import eq_ from alembic.testing import eq_ignore_whitespace from alembic.testing import provide_metadata from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.env import write_script from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import FutureEngineMixin from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TablesTest from alembic.testing.fixtures import TestBase from alembic.util import sqla_compat class PostgresqlOpTest(TestBase): def test_rename_table_postgresql(self): context = op_fixture("postgresql") op.rename_table("t1", "t2") context.assert_("ALTER TABLE t1 RENAME TO t2") def test_rename_table_schema_postgresql(self): context = op_fixture("postgresql") op.rename_table("t1", "t2", schema="foo") context.assert_("ALTER TABLE foo.t1 RENAME TO t2") def test_create_index_postgresql_expressions(self): context = op_fixture("postgresql") op.create_index( "geocoded", "locations", [text("lower(coordinates)")], postgresql_where=text("locations.coordinates != Null"), ) context.assert_( "CREATE INDEX geocoded ON locations (lower(coordinates)) " "WHERE locations.coordinates != Null" ) def test_create_index_postgresql_where(self): context = op_fixture("postgresql") op.create_index( "geocoded", "locations", ["coordinates"], postgresql_where=text("locations.coordinates != Null"), ) context.assert_( "CREATE INDEX geocoded ON locations (coordinates) " "WHERE locations.coordinates != Null" ) def test_create_index_postgresql_concurrently(self): context = op_fixture("postgresql") op.create_index( "geocoded", "locations", ["coordinates"], postgresql_concurrently=True, ) context.assert_( "CREATE INDEX CONCURRENTLY geocoded ON locations (coordinates)" ) @config.requirements.sqlalchemy_14 def test_create_index_postgresql_include(self): context = op_fixture("postgresql") op.create_index( "i", "t", ["c1", "c2"], unique=False, postgresql_include=["inc"] ) context.assert_("CREATE INDEX i ON t (c1, c2) INCLUDE (inc)") def test_create_index_postgresql_include_is_none(self): context = op_fixture("postgresql") op.create_index("i", "t", ["c1", "c2"], unique=False) context.assert_("CREATE INDEX i ON t (c1, c2)") def test_drop_index_postgresql_concurrently(self): context = op_fixture("postgresql") op.drop_index("geocoded", "locations", postgresql_concurrently=True) context.assert_("DROP INDEX CONCURRENTLY geocoded") def test_alter_column_type_using(self): context = op_fixture("postgresql") op.alter_column("t", "c", type_=Integer, postgresql_using="c::integer") context.assert_( "ALTER TABLE t ALTER COLUMN c TYPE INTEGER USING c::integer" ) def test_col_w_pk_is_serial(self): context = op_fixture("postgresql") op.add_column("some_table", Column("q", Integer, primary_key=True)) context.assert_("ALTER TABLE some_table ADD COLUMN q SERIAL NOT NULL") def test_create_exclude_constraint(self): context = op_fixture("postgresql") op.create_exclude_constraint( "ex1", "t1", ("x", ">"), where="x > 5", using="gist" ) context.assert_( "ALTER TABLE t1 ADD CONSTRAINT ex1 EXCLUDE USING gist (x WITH >) " "WHERE (x > 5)" ) def test_create_exclude_constraint_quoted_literal(self): context = op_fixture("postgresql") op.create_exclude_constraint( "ex1", "SomeTable", (column("SomeColumn"), ">"), where='"SomeColumn" > 5', using="gist", ) context.assert_( 'ALTER TABLE "SomeTable" ADD CONSTRAINT ex1 EXCLUDE USING gist ' '("SomeColumn" WITH >) WHERE ("SomeColumn" > 5)' ) def test_create_exclude_constraint_quoted_column(self): context = op_fixture("postgresql") op.create_exclude_constraint( "ex1", "SomeTable", (column("SomeColumn"), ">"), where=column("SomeColumn") > 5, using="gist", ) context.assert_( 'ALTER TABLE "SomeTable" ADD CONSTRAINT ex1 EXCLUDE ' 'USING gist ("SomeColumn" WITH >) WHERE ("SomeColumn" > 5)' ) def test_add_column_with_comment(self): context = op_fixture("postgresql") op.add_column("t", Column("q", Integer, comment="This is a comment")) context.assert_( "ALTER TABLE t ADD COLUMN q INTEGER", "COMMENT ON COLUMN t.q IS 'This is a comment'", ) def test_alter_column_with_comment(self): context = op_fixture("postgresql") op.alter_column( "t", "c", nullable=False, existing_type=Boolean(), schema="foo", comment="This is a column comment", ) context.assert_( "ALTER TABLE foo.t ALTER COLUMN c SET NOT NULL", "COMMENT ON COLUMN foo.t.c IS 'This is a column comment'", ) def test_alter_column_add_comment(self): context = op_fixture("postgresql") op.alter_column( "t", "c", existing_type=Boolean(), schema="foo", comment="This is a column comment", ) context.assert_( "COMMENT ON COLUMN foo.t.c IS 'This is a column comment'" ) def test_alter_column_add_comment_table_and_column_quoting(self): context = op_fixture("postgresql") op.alter_column( "T", "C", existing_type=Boolean(), schema="foo", comment="This is a column comment", ) context.assert_( 'COMMENT ON COLUMN foo."T"."C" IS \'This is a column comment\'' ) def test_alter_column_add_comment_quoting(self): context = op_fixture("postgresql") op.alter_column( "t", "c", existing_type=Boolean(), schema="foo", comment="This is a column 'comment'", ) context.assert_( "COMMENT ON COLUMN foo.t.c IS 'This is a column ''comment'''" ) def test_alter_column_drop_comment(self): context = op_fixture("postgresql") op.alter_column( "t", "c", existing_type=Boolean(), schema="foo", comment=None, existing_comment="This is a column comment", ) context.assert_("COMMENT ON COLUMN foo.t.c IS NULL") def test_create_table_with_comment(self): context = op_fixture("postgresql") op.create_table( "t2", Column("c1", Integer, primary_key=True), Column("c2", Integer), comment="t2 comment", ) context.assert_( "CREATE TABLE t2 (c1 SERIAL NOT NULL, " "c2 INTEGER, PRIMARY KEY (c1))", "COMMENT ON TABLE t2 IS 't2 comment'", ) def test_create_table_with_column_comments(self): context = op_fixture("postgresql") op.create_table( "t2", Column("c1", Integer, primary_key=True, comment="c1 comment"), Column("c2", Integer, comment="c2 comment"), comment="t2 comment", ) context.assert_( "CREATE TABLE t2 (c1 SERIAL NOT NULL, " "c2 INTEGER, PRIMARY KEY (c1))", "COMMENT ON TABLE t2 IS 't2 comment'", "COMMENT ON COLUMN t2.c1 IS 'c1 comment'", "COMMENT ON COLUMN t2.c2 IS 'c2 comment'", ) def test_create_table_comment(self): # this is handled by SQLAlchemy's compilers context = op_fixture("postgresql") op.create_table_comment("t2", comment="t2 table", schema="foo") context.assert_("COMMENT ON TABLE foo.t2 IS 't2 table'") def test_drop_table_comment(self): # this is handled by SQLAlchemy's compilers context = op_fixture("postgresql") op.drop_table_comment("t2", existing_comment="t2 table", schema="foo") context.assert_("COMMENT ON TABLE foo.t2 IS NULL") @config.requirements.computed_columns def test_add_column_computed(self): context = op_fixture("postgresql") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Computed("foo * 5")), ) context.assert_( "ALTER TABLE t1 ADD COLUMN some_column " "INTEGER GENERATED ALWAYS AS (foo * 5) STORED" ) @combinations( (lambda: sqla_compat.Computed("foo * 5"), lambda: None), (lambda: None, lambda: sqla_compat.Computed("foo * 5")), ( lambda: sqla_compat.Computed("foo * 42"), lambda: sqla_compat.Computed("foo * 5"), ), ) @config.requirements.computed_columns def test_alter_column_computed_not_supported(self, sd, esd): op_fixture("postgresql") assert_raises_message( exc.CompileError, 'Adding or removing a "computed" construct, e.g. ' "GENERATED ALWAYS AS, to or from an existing column is not " "supported.", op.alter_column, "t1", "c1", server_default=sd(), existing_server_default=esd(), ) @config.requirements.identity_columns @combinations( ({}, None), (dict(always=True), None), ( dict(start=3, increment=33, maxvalue=99, cycle=True), "INCREMENT BY 33 START WITH 3 MAXVALUE 99 CYCLE", ), ) def test_add_column_identity(self, kw, text): context = op_fixture("postgresql") op.add_column( "t1", Column("some_column", Integer, sqla_compat.Identity(**kw)), ) qualification = "ALWAYS" if kw.get("always", False) else "BY DEFAULT" options = " (%s)" % text if text else "" context.assert_( "ALTER TABLE t1 ADD COLUMN some_column " "INTEGER GENERATED %s AS IDENTITY%s" % (qualification, options) ) @config.requirements.identity_columns @combinations( ({}, None), (dict(always=True), None), ( dict(start=3, increment=33, maxvalue=99, cycle=True), "INCREMENT BY 33 START WITH 3 MAXVALUE 99 CYCLE", ), ) def test_add_identity_to_column(self, kw, text): context = op_fixture("postgresql") op.alter_column( "t1", "some_column", server_default=sqla_compat.Identity(**kw), existing_server_default=None, ) qualification = "ALWAYS" if kw.get("always", False) else "BY DEFAULT" options = " (%s)" % text if text else "" context.assert_( "ALTER TABLE t1 ALTER COLUMN some_column ADD " "GENERATED %s AS IDENTITY%s" % (qualification, options) ) @config.requirements.identity_columns def test_remove_identity_from_column(self): context = op_fixture("postgresql") op.alter_column( "t1", "some_column", server_default=None, existing_server_default=sqla_compat.Identity(), ) context.assert_( "ALTER TABLE t1 ALTER COLUMN some_column DROP IDENTITY" ) @config.requirements.identity_columns @combinations( ({}, dict(always=True), "SET GENERATED ALWAYS"), ( dict(always=True), dict(always=False, start=3), "SET GENERATED BY DEFAULT SET START WITH 3", ), ( dict(always=True, start=3, increment=2, minvalue=-3, maxvalue=99), dict( always=True, start=3, increment=1, minvalue=-3, maxvalue=99, cycle=True, ), "SET CYCLE SET INCREMENT BY 1", ), ( dict( always=False, start=3, maxvalue=9999, minvalue=0, ), dict(always=False, start=3, order=True, on_null=False, cache=2), "SET CACHE 2", ), ( dict(always=False), dict(always=None, minvalue=0), "SET MINVALUE 0", ), ) def test_change_identity_in_column(self, existing, updated, text): context = op_fixture("postgresql") op.alter_column( "t1", "some_column", server_default=sqla_compat.Identity(**updated), existing_server_default=sqla_compat.Identity(**existing), ) context.assert_("ALTER TABLE t1 ALTER COLUMN some_column %s" % text) class PGAutocommitBlockTest(TestBase): __only_on__ = "postgresql" __backend__ = True def setUp(self): self.conn = conn = config.db.connect() with conn.begin(): conn.execute( text("CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')") ) def tearDown(self): with self.conn.begin(): self.conn.execute(text("DROP TYPE mood")) def test_alter_enum(self, migration_context): with migration_context.begin_transaction(_per_migration=True): with migration_context.autocommit_block(): migration_context.execute( text("ALTER TYPE mood ADD VALUE 'soso'") ) class PGAutocommitBlockTestFuture(FutureEngineMixin, PGAutocommitBlockTest): pass class PGOfflineEnumTest(TestBase): def setUp(self): staging_env() self.cfg = cfg = _no_sql_testing_config() self.rid = rid = util.rev_id() self.script = script = ScriptDirectory.from_config(cfg) script.generate_revision(rid, None, refresh=True) def tearDown(self): clear_staging_env() def _inline_enum_script(self): write_script( self.script, self.rid, """ revision = '%s' down_revision = None from alembic import op from sqlalchemy.dialects.postgresql import ENUM from sqlalchemy import Column def upgrade(): op.create_table("sometable", Column("data", ENUM("one", "two", "three", name="pgenum")) ) def downgrade(): op.drop_table("sometable") """ % self.rid, ) def _distinct_enum_script(self): write_script( self.script, self.rid, """ revision = '%s' down_revision = None from alembic import op from sqlalchemy.dialects.postgresql import ENUM from sqlalchemy import Column def upgrade(): enum = ENUM("one", "two", "three", name="pgenum", create_type=False) enum.create(op.get_bind(), checkfirst=False) op.create_table("sometable", Column("data", enum) ) def downgrade(): op.drop_table("sometable") ENUM(name="pgenum").drop(op.get_bind(), checkfirst=False) """ % self.rid, ) def test_offline_inline_enum_create(self): self._inline_enum_script() with capture_context_buffer() as buf: command.upgrade(self.cfg, self.rid, sql=True) assert ( "CREATE TYPE pgenum AS " "ENUM ('one', 'two', 'three')" in buf.getvalue() ) assert "CREATE TABLE sometable (\n data pgenum\n)" in buf.getvalue() def test_offline_inline_enum_drop(self): self._inline_enum_script() with capture_context_buffer() as buf: command.downgrade(self.cfg, "%s:base" % self.rid, sql=True) assert "DROP TABLE sometable" in buf.getvalue() # no drop since we didn't emit events assert "DROP TYPE pgenum" not in buf.getvalue() def test_offline_distinct_enum_create(self): self._distinct_enum_script() with capture_context_buffer() as buf: command.upgrade(self.cfg, self.rid, sql=True) assert ( "CREATE TYPE pgenum AS ENUM " "('one', 'two', 'three')" in buf.getvalue() ) assert "CREATE TABLE sometable (\n data pgenum\n)" in buf.getvalue() def test_offline_distinct_enum_drop(self): self._distinct_enum_script() with capture_context_buffer() as buf: command.downgrade(self.cfg, "%s:base" % self.rid, sql=True) assert "DROP TABLE sometable" in buf.getvalue() assert "DROP TYPE pgenum" in buf.getvalue() class PostgresqlInlineLiteralTest(TablesTest): __only_on__ = "postgresql" __backend__ = True @classmethod def define_tables(cls, metadata): Table("tab", metadata, Column("col", String(50))) @classmethod def insert_data(cls, connection): connection.execute( text( """ insert into tab (col) values ('old data 1'), ('old data 2.1'), ('old data 3') """ ) ) def test_inline_percent(self, connection, ops_context): # TODO: here's the issue, you need to escape this. tab = table("tab", column("col")) ops_context.execute( tab.update() .where(tab.c.col.like(ops_context.inline_literal("%.%"))) .values(col=ops_context.inline_literal("new data")), execution_options={"no_parameters": True}, ) eq_( connection.execute( text("select count(*) from tab where col='new data'") ).scalar(), 1, ) class PostgresqlDefaultCompareTest(TestBase): __only_on__ = "postgresql" __backend__ = True @classmethod def setup_class(cls): cls.bind = config.db staging_env() cls.migration_context = MigrationContext.configure( connection=cls.bind.connect(), opts={"compare_type": True, "compare_server_default": True}, ) def setUp(self): self.metadata = MetaData() self.autogen_context = api.AutogenContext(self.migration_context) @classmethod def teardown_class(cls): clear_staging_env() def tearDown(self): with config.db.begin() as conn: self.metadata.drop_all(conn) def _compare_default_roundtrip( self, type_, orig_default, alternate=None, diff_expected=None ): diff_expected = ( diff_expected if diff_expected is not None else alternate is not None ) if alternate is None: alternate = orig_default t1 = Table( "test", self.metadata, Column("somecol", type_, server_default=orig_default), ) t2 = Table( "test", MetaData(), Column("somecol", type_, server_default=alternate), ) t1.create(self.bind) insp = inspect(self.bind) cols = insp.get_columns(t1.name) insp_col = Column( "somecol", cols[0]["type"], server_default=text(cols[0]["default"]) ) op = ops.AlterColumnOp("test", "somecol") _compare_server_default( self.autogen_context, op, None, "test", "somecol", insp_col, t2.c.somecol, ) diffs = op.to_diff_tuple() eq_(bool(diffs), diff_expected) def _compare_default(self, t1, t2, col, rendered): t1.create(self.bind, checkfirst=True) insp = inspect(self.bind) cols = insp.get_columns(t1.name) ctx = self.autogen_context.migration_context return ctx.impl.compare_server_default( None, col, rendered, cols[0]["default"] ) def test_compare_string_blank_default(self): self._compare_default_roundtrip(String(8), "") def test_compare_string_nonblank_default(self): self._compare_default_roundtrip(String(8), "hi") def test_compare_interval_str(self): # this form shouldn't be used but testing here # for compatibility self._compare_default_roundtrip(Interval, "14 days") @config.requirements.postgresql_uuid_ossp def test_compare_uuid_text(self): self._compare_default_roundtrip(UUID, text("uuid_generate_v4()")) def test_compare_interval_text(self): self._compare_default_roundtrip(Interval, text("'14 days'")) def test_compare_array_of_integer_text(self): self._compare_default_roundtrip( ARRAY(Integer), text("(ARRAY[]::integer[])") ) def test_compare_current_timestamp_text(self): self._compare_default_roundtrip( DateTime(), text("TIMEZONE('utc', CURRENT_TIMESTAMP)") ) def test_compare_current_timestamp_fn_w_binds(self): self._compare_default_roundtrip( DateTime(), func.timezone("utc", func.current_timestamp()) ) def test_compare_integer_str(self): self._compare_default_roundtrip(Integer(), "5") def test_compare_integer_text(self): self._compare_default_roundtrip(Integer(), text("5")) def test_compare_integer_text_diff(self): self._compare_default_roundtrip(Integer(), text("5"), "7") def test_compare_float_str(self): self._compare_default_roundtrip(Float(), "5.2") def test_compare_float_text(self): self._compare_default_roundtrip(Float(), text("5.2")) def test_compare_float_no_diff1(self): self._compare_default_roundtrip( Float(), text("5.2"), "5.2", diff_expected=False ) def test_compare_float_no_diff2(self): self._compare_default_roundtrip( Float(), "5.2", text("5.2"), diff_expected=False ) def test_compare_float_no_diff3(self): self._compare_default_roundtrip( Float(), text("5"), text("5.0"), diff_expected=False ) def test_compare_float_no_diff4(self): self._compare_default_roundtrip( Float(), "5", "5.0", diff_expected=False ) def test_compare_float_no_diff5(self): self._compare_default_roundtrip( Float(), text("5"), "5.0", diff_expected=False ) def test_compare_float_no_diff6(self): self._compare_default_roundtrip( Float(), "5", text("5.0"), diff_expected=False ) def test_compare_numeric_no_diff(self): self._compare_default_roundtrip( Numeric(), text("5"), "5.0", diff_expected=False ) def test_compare_unicode_literal(self): self._compare_default_roundtrip(String(), u"im a default") # TOOD: will need to actually eval() the repr() and # spend more effort figuring out exactly the kind of expression # to use def _TODO_test_compare_character_str_w_singlequote(self): self._compare_default_roundtrip(String(), "hel''lo") def test_compare_character_str(self): self._compare_default_roundtrip(String(), "hello") def test_compare_character_text(self): self._compare_default_roundtrip(String(), text("'hello'")) def test_compare_character_str_diff(self): self._compare_default_roundtrip(String(), "hello", "there") def test_compare_character_text_diff(self): self._compare_default_roundtrip( String(), text("'hello'"), text("'there'") ) def test_primary_key_skip(self): """Test that SERIAL cols are just skipped""" t1 = Table( "sometable", self.metadata, Column("id", Integer, primary_key=True) ) t2 = Table( "sometable", MetaData(), Column("id", Integer, primary_key=True) ) assert not self._compare_default(t1, t2, t2.c.id, "") class PostgresqlDetectSerialTest(TestBase): __only_on__ = "postgresql" __backend__ = True @classmethod def setup_class(cls): cls.bind = config.db staging_env() def setUp(self): self.conn = self.bind.connect() self.migration_context = MigrationContext.configure( connection=self.conn, opts={"compare_type": True, "compare_server_default": True}, ) self.autogen_context = api.AutogenContext(self.migration_context) def tearDown(self): self.conn.close() @classmethod def teardown_class(cls): clear_staging_env() @provide_metadata def _expect_default(self, c_expected, col, seq=None): Table("t", self.metadata, col) self.autogen_context.metadata = self.metadata if seq: seq._set_metadata(self.metadata) self.metadata.create_all(config.db) insp = inspect(config.db) uo = ops.UpgradeOps(ops=[]) _compare_tables( set([(None, "t")]), set([]), insp, uo, self.autogen_context ) diffs = uo.as_diffs() tab = diffs[0][1] eq_( _render_server_default_for_compare( tab.c.x.server_default, tab.c.x, self.autogen_context ), c_expected, ) insp = inspect(config.db) uo = ops.UpgradeOps(ops=[]) m2 = MetaData() Table("t", m2, Column("x", BigInteger())) self.autogen_context.metadata = m2 _compare_tables( set([(None, "t")]), set([(None, "t")]), insp, uo, self.autogen_context, ) diffs = uo.as_diffs() server_default = diffs[0][0][4]["existing_server_default"] eq_( _render_server_default_for_compare( server_default, tab.c.x, self.autogen_context ), c_expected, ) def test_serial(self): self._expect_default(None, Column("x", Integer, primary_key=True)) def test_separate_seq(self): seq = Sequence("x_id_seq") self._expect_default( "nextval('x_id_seq'::regclass)", Column( "x", Integer, server_default=seq.next_value(), primary_key=True ), seq, ) def test_numeric(self): seq = Sequence("x_id_seq") self._expect_default( "nextval('x_id_seq'::regclass)", Column( "x", Numeric(8, 2), server_default=seq.next_value(), primary_key=True, ), seq, ) def test_no_default(self): self._expect_default( None, Column("x", Integer, autoincrement=False, primary_key=True) ) class PostgresqlAutogenRenderTest(TestBase): def setUp(self): ctx_opts = { "sqlalchemy_module_prefix": "sa.", "alembic_module_prefix": "op.", "target_metadata": MetaData(), } context = MigrationContext.configure( dialect_name="postgresql", opts=ctx_opts ) self.autogen_context = api.AutogenContext(context) def test_render_add_index_pg_where(self): autogen_context = self.autogen_context m = MetaData() t = Table("t", m, Column("x", String), Column("y", String)) idx = Index( "foo_idx", t.c.x, t.c.y, postgresql_where=(t.c.y == "something") ) op_obj = ops.CreateIndexOp.from_index(idx) eq_ignore_whitespace( autogenerate.render_op_text(autogen_context, op_obj), """op.create_index('foo_idx', 't', \ ['x', 'y'], unique=False, """ """postgresql_where=sa.text("y = 'something'"))""", ) def test_render_server_default_native_boolean(self): c = Column( "updated_at", Boolean(), server_default=false(), nullable=False ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('updated_at', sa.Boolean(), " "server_default=sa.text('false'), " "nullable=False)", ) def test_postgresql_array_type(self): eq_ignore_whitespace( autogenerate.render._repr_type( ARRAY(Integer), self.autogen_context ), "postgresql.ARRAY(sa.Integer())", ) eq_ignore_whitespace( autogenerate.render._repr_type( ARRAY(DateTime(timezone=True)), self.autogen_context ), "postgresql.ARRAY(sa.DateTime(timezone=True))", ) eq_ignore_whitespace( autogenerate.render._repr_type( ARRAY(BYTEA, as_tuple=True, dimensions=2), self.autogen_context ), "postgresql.ARRAY(postgresql.BYTEA(), " "as_tuple=True, dimensions=2)", ) assert ( "from sqlalchemy.dialects import postgresql" in self.autogen_context.imports ) def test_postgresql_hstore_subtypes(self): eq_ignore_whitespace( autogenerate.render._repr_type(HSTORE(), self.autogen_context), "postgresql.HSTORE(text_type=sa.Text())", ) eq_ignore_whitespace( autogenerate.render._repr_type( HSTORE(text_type=String()), self.autogen_context ), "postgresql.HSTORE(text_type=sa.String())", ) eq_ignore_whitespace( autogenerate.render._repr_type( HSTORE(text_type=BYTEA()), self.autogen_context ), "postgresql.HSTORE(text_type=postgresql.BYTEA())", ) assert ( "from sqlalchemy.dialects import postgresql" in self.autogen_context.imports ) def test_generic_array_type(self): eq_ignore_whitespace( autogenerate.render._repr_type( types.ARRAY(Integer), self.autogen_context ), "sa.ARRAY(sa.Integer())", ) eq_ignore_whitespace( autogenerate.render._repr_type( types.ARRAY(DateTime(timezone=True)), self.autogen_context ), "sa.ARRAY(sa.DateTime(timezone=True))", ) assert ( "from sqlalchemy.dialects import postgresql" not in self.autogen_context.imports ) eq_ignore_whitespace( autogenerate.render._repr_type( types.ARRAY(BYTEA, as_tuple=True, dimensions=2), self.autogen_context, ), "sa.ARRAY(postgresql.BYTEA(), as_tuple=True, dimensions=2)", ) assert ( "from sqlalchemy.dialects import postgresql" in self.autogen_context.imports ) def test_array_type_user_defined_inner(self): def repr_type(typestring, object_, autogen_context): if typestring == "type" and isinstance(object_, String): return "foobar.MYVARCHAR" else: return False self.autogen_context.opts.update(render_item=repr_type) eq_ignore_whitespace( autogenerate.render._repr_type( ARRAY(String), self.autogen_context ), "postgresql.ARRAY(foobar.MYVARCHAR)", ) def test_add_exclude_constraint(self): from sqlalchemy.dialects.postgresql import ExcludeConstraint autogen_context = self.autogen_context m = MetaData() t = Table("t", m, Column("x", String), Column("y", String)) op_obj = ops.AddConstraintOp.from_constraint( ExcludeConstraint( (t.c.x, ">"), where=t.c.x != 2, using="gist", name="t_excl_x" ) ) eq_ignore_whitespace( autogenerate.render_op_text(autogen_context, op_obj), "op.create_exclude_constraint('t_excl_x', " "'t', (sa.column('x'), '>'), " "where=sa.text('x != 2'), using='gist')", ) def test_add_exclude_constraint_case_sensitive(self): from sqlalchemy.dialects.postgresql import ExcludeConstraint autogen_context = self.autogen_context m = MetaData() t = Table( "TTAble", m, Column("XColumn", String), Column("YColumn", String) ) op_obj = ops.AddConstraintOp.from_constraint( ExcludeConstraint( (t.c.XColumn, ">"), where=t.c.XColumn != 2, using="gist", name="t_excl_x", ) ) eq_ignore_whitespace( autogenerate.render_op_text(autogen_context, op_obj), "op.create_exclude_constraint('t_excl_x', 'TTAble', " "(sa.column('XColumn'), '>'), " "where=sa.text('\"XColumn\" != 2'), using='gist')", ) def test_inline_exclude_constraint(self): from sqlalchemy.dialects.postgresql import ExcludeConstraint autogen_context = self.autogen_context m = MetaData() t = Table( "t", m, Column("x", String), Column("y", String), ExcludeConstraint( (column("x"), ">"), using="gist", where="x != 2", name="t_excl_x", ), ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(autogen_context, op_obj), "op.create_table('t',sa.Column('x', sa.String(), nullable=True)," "sa.Column('y', sa.String(), nullable=True)," "postgresql.ExcludeConstraint((sa.column('x'), '>'), " "where=sa.text('x != 2'), using='gist', name='t_excl_x')" ")", ) def test_inline_exclude_constraint_case_sensitive(self): from sqlalchemy.dialects.postgresql import ExcludeConstraint autogen_context = self.autogen_context m = MetaData() t = Table( "TTable", m, Column("XColumn", String), Column("YColumn", String) ) ExcludeConstraint( (t.c.XColumn, ">"), using="gist", where='"XColumn" != 2', name="TExclX", ) op_obj = ops.CreateTableOp.from_table(t) eq_ignore_whitespace( autogenerate.render_op_text(autogen_context, op_obj), "op.create_table('TTable',sa.Column('XColumn', sa.String(), " "nullable=True)," "sa.Column('YColumn', sa.String(), nullable=True)," "postgresql.ExcludeConstraint((sa.column('XColumn'), '>'), " "where=sa.text('\"XColumn\" != 2'), using='gist', " "name='TExclX'))", ) def test_json_type(self): eq_ignore_whitespace( autogenerate.render._repr_type(JSON(), self.autogen_context), "postgresql.JSON(astext_type=sa.Text())", ) def test_jsonb_type(self): eq_ignore_whitespace( autogenerate.render._repr_type(JSONB(), self.autogen_context), "postgresql.JSONB(astext_type=sa.Text())", ) alembic-rel_1_7_6/tests/test_revision.py000066400000000000000000001412071417624537100205550ustar00rootroot00000000000000from sqlalchemy.testing import util as sqla_testing_util from alembic.script.revision import CycleDetected from alembic.script.revision import DependencyCycleDetected from alembic.script.revision import DependencyLoopDetected from alembic.script.revision import LoopDetected from alembic.script.revision import MultipleHeads from alembic.script.revision import Revision from alembic.script.revision import RevisionError from alembic.script.revision import RevisionMap from alembic.testing import assert_raises_message from alembic.testing import eq_ from alembic.testing import expect_raises_message from alembic.testing.fixtures import TestBase from . import _large_map class APITest(TestBase): def test_invalid_datatype(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ("b",)), ] ) with expect_raises_message( RevisionError, "revision identifier b'12345' is not a string; " "ensure database driver settings are correct", ): map_.get_revisions(b"12345") with expect_raises_message( RevisionError, "revision identifier b'12345' is not a string; " "ensure database driver settings are correct", ): map_.get_revision(b"12345") with expect_raises_message( RevisionError, r"revision identifier \(b'12345',\) is not a string; " "ensure database driver settings are correct", ): map_.get_revision((b"12345",)) map_.get_revision(("a",)) map_.get_revision("a") def test_add_revision_one_head(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ("b",)), ] ) eq_(map_.heads, ("c",)) map_.add_revision(Revision("d", ("c",))) eq_(map_.heads, ("d",)) def test_add_revision_two_head(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c1", ("b",)), Revision("c2", ("b",)), ] ) eq_(map_.heads, ("c1", "c2")) map_.add_revision(Revision("d1", ("c1",))) eq_(map_.heads, ("c2", "d1")) def test_get_revision_head_single(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ("b",)), ] ) eq_(map_.get_revision("head"), map_._revision_map["c"]) def test_get_revision_base_single(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ("b",)), ] ) eq_(map_.get_revision("base"), None) def test_get_revision_head_multiple(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c1", ("b",)), Revision("c2", ("b",)), ] ) assert_raises_message( MultipleHeads, "Multiple heads are present", map_.get_revision, "head", ) def test_get_revision_heads_multiple(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c1", ("b",)), Revision("c2", ("b",)), ] ) assert_raises_message( MultipleHeads, "Multiple heads are present", map_.get_revision, "heads", ) def test_get_revisions_head_multiple(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c1", ("b",)), Revision("c2", ("b",)), ] ) assert_raises_message( MultipleHeads, "Multiple heads are present", map_.get_revisions, "head", ) def test_get_revisions_heads_multiple(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c1", ("b",)), Revision("c2", ("b",)), ] ) eq_( map_.get_revisions("heads"), ( map_._revision_map["c1"], map_._revision_map["c2"], ), ) def test_get_revision_base_multiple(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ()), Revision("d", ("c",)), ] ) eq_(map_.get_revision("base"), None) def test_iterate_tolerates_dupe_targets(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ("b",)), ] ) eq_( [ r.revision for r in map_.iterate_revisions( ("c", "c"), "a", inclusive=False ) ], # Not inclusive so should not traverse a ["c", "b"], ) def test_repr_revs(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a",)), Revision("c", (), dependencies=("a", "b")), ] ) c = map_._revision_map["c"] eq_(repr(c), "Revision('c', None, dependencies=('a', 'b'))") class DownIterateTest(TestBase): def _assert_iteration( self, upper, lower, assertion, inclusive=True, map_=None, implicit_base=False, select_for_downgrade=False, ): if map_ is None: map_ = self.map result = [ rev.revision for rev in map_.iterate_revisions( upper, lower, inclusive=inclusive, implicit_base=implicit_base, select_for_downgrade=select_for_downgrade, ) ] edges = [ (rev, child.revision) for child in map_._revision_map.values() if child is not None for rev in child._normalized_down_revisions ] assert sqla_testing_util.conforms_partial_ordering( edges, list(reversed(result)) ) eq_( result, assertion, ) class DiamondTest(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("a", ()), Revision("b1", ("a",)), Revision("b2", ("a",)), Revision("c", ("b1", "b2")), Revision("d", ("c",)), ] ) def test_iterate_simple_diamond(self): self._assert_iteration("d", "a", ["d", "c", "b1", "b2", "a"]) class EmptyMapTest(DownIterateTest): # see issue #258 def setUp(self): self.map = RevisionMap(lambda: []) def test_iterate(self): self._assert_iteration("head", "base", []) class LabeledBranchTest(DownIterateTest): def test_dupe_branch_collection(self): def fn(): return [ Revision("a", ()), Revision("b", ("a",)), Revision("c", ("b",), branch_labels=["xy1"]), Revision("d", ()), Revision("e", ("d",), branch_labels=["xy1"]), Revision("f", ("e",)), ] assert_raises_message( RevisionError, r"Branch name 'xy1' in revision (?:e|c) already " "used by revision (?:e|c)", getattr, RevisionMap(fn), "_revision_map", ) def test_filter_for_lineage_labeled_head_across_merge(self): def fn(): return [ Revision("a", ()), Revision("b", ("a",)), Revision("c1", ("b",), branch_labels="c1branch"), Revision("c2", ("b",)), Revision("d", ("c1", "c2")), ] map_ = RevisionMap(fn) c1 = map_.get_revision("c1") c2 = map_.get_revision("c2") d = map_.get_revision("d") eq_(map_.filter_for_lineage([c1, c2, d], "c1branch@head"), (c1, c2, d)) def test_filter_for_lineage_heads(self): eq_( self.map.filter_for_lineage([self.map.get_revision("f")], "heads"), (self.map.get_revision("f"),), ) def setUp(self): self.map = RevisionMap( lambda: [ Revision("a", (), branch_labels="abranch"), Revision("b", ("a",)), Revision("somelongername", ("b",)), Revision("c", ("somelongername",)), Revision("d", ()), Revision("e", ("d",), branch_labels=["ebranch"]), Revision("someothername", ("e",)), Revision("f", ("someothername",)), ] ) def test_get_base_revisions_labeled(self): eq_(self.map._get_base_revisions("somelongername@base"), ("a",)) def test_get_current_named_rev(self): eq_(self.map.get_revision("ebranch@head"), self.map.get_revision("f")) def test_get_base_revisions(self): eq_(self.map._get_base_revisions("base"), ("a", "d")) def test_iterate_head_to_named_base(self): self._assert_iteration( "heads", "ebranch@base", ["f", "someothername", "e", "d"] ) self._assert_iteration( "heads", "abranch@base", ["c", "somelongername", "b", "a"] ) def test_iterate_named_head_to_base(self): self._assert_iteration( "ebranch@head", "base", ["f", "someothername", "e", "d"] ) self._assert_iteration( "abranch@head", "base", ["c", "somelongername", "b", "a"] ) def test_iterate_named_head_to_heads(self): self._assert_iteration("heads", "ebranch@head", ["f"], inclusive=True) def test_iterate_named_rev_to_heads(self): self._assert_iteration( "heads", "ebranch@d", ["f", "someothername", "e", "d"], inclusive=True, ) def test_iterate_head_to_version_specific_base(self): self._assert_iteration( "heads", "e@base", ["f", "someothername", "e", "d"] ) self._assert_iteration( "heads", "c@base", ["c", "somelongername", "b", "a"] ) def test_iterate_to_branch_at_rev(self): self._assert_iteration( "heads", "ebranch@d", ["f", "someothername", "e", "d"] ) def test_branch_w_down_relative(self): self._assert_iteration( "heads", "ebranch@-2", ["f", "someothername", "e"] ) def test_branch_w_up_relative(self): # In the absence of a branch point surely the +2 is relative to base? # So 'someothername' would be referenced by ebranch@+3? self._assert_iteration("ebranch@+2", "base", ["e", "d"]) def test_partial_id_resolve(self): eq_(self.map.get_revision("ebranch@some").revision, "someothername") eq_(self.map.get_revision("abranch@some").revision, "somelongername") def test_partial_id_resolve_too_short(self): assert_raises_message( RevisionError, "No such revision or branch 'sos'; please ensure at least " "four characters are present for partial revision identifier " "matches", self.map.get_revision, "ebranch@sos", ) def test_branch_at_heads(self): eq_(self.map.get_revision("abranch@heads").revision, "c") def test_branch_at_syntax(self): eq_(self.map.get_revision("abranch@head").revision, "c") eq_(self.map.get_revision("abranch@base"), None) eq_(self.map.get_revision("ebranch@head").revision, "f") eq_(self.map.get_revision("abranch@base"), None) eq_(self.map.get_revision("ebranch@d").revision, "d") def test_branch_at_self(self): eq_(self.map.get_revision("ebranch@ebranch").revision, "e") def test_retrieve_branch_revision(self): eq_(self.map.get_revision("abranch").revision, "a") eq_(self.map.get_revision("ebranch").revision, "e") def test_rev_not_in_branch(self): assert_raises_message( RevisionError, "Revision b is not a member of branch 'ebranch'", self.map.get_revision, "ebranch@b", ) assert_raises_message( RevisionError, "Revision d is not a member of branch 'abranch'", self.map.get_revision, "abranch@d", ) def test_actually_short_rev_name(self): eq_(self.map.get_revision("e").revision, "e") def test_no_revision_exists(self): assert_raises_message( RevisionError, "No such revision or branch 'qprstuv'$", self.map.get_revision, "abranch@qprstuv", ) assert_raises_message( RevisionError, "No such revision or branch 'qpr'; please ensure at least " "four characters are present for partial revision identifier " "matches$", self.map.get_revision, "abranch@qpr", ) def test_not_actually_a_branch(self): eq_(self.map.get_revision("e@d").revision, "d") def test_not_actually_a_branch_partial_resolution(self): eq_(self.map.get_revision("someoth@d").revision, "d") def test_no_such_branch(self): assert_raises_message( RevisionError, "No such branch: 'x'", self.map.get_revision, "x@d" ) class LongShortBranchTest(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("a", ()), Revision("b1", ("a",)), Revision("b2", ("a",)), Revision("c1", ("b1",)), Revision("d11", ("c1",)), Revision("d12", ("c1",)), ] ) def test_iterate_full(self): self._assert_iteration( "heads", "base", ["b2", "d11", "d12", "c1", "b1", "a"] ) class MultipleBranchTest(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("a", ()), Revision("b1", ("a",)), Revision("b2", ("a",)), Revision("cb1", ("b1",)), Revision("cb2", ("b2",)), Revision("d1cb1", ("cb1",)), # head Revision("d2cb1", ("cb1",)), # head Revision("d1cb2", ("cb2",)), Revision("d2cb2", ("cb2",)), Revision("d3cb2", ("cb2",)), # head Revision("d1d2cb2", ("d1cb2", "d2cb2")), # head + merge point ] ) def test_iterate_from_merge_point(self): self._assert_iteration( "d1d2cb2", "a", ["d1d2cb2", "d1cb2", "d2cb2", "cb2", "b2", "a"] ) def test_iterate_multiple_heads(self): self._assert_iteration( ["d2cb2", "d3cb2"], "a", ["d2cb2", "d3cb2", "cb2", "b2", "a"] ) def test_iterate_single_branch(self): self._assert_iteration("d3cb2", "a", ["d3cb2", "cb2", "b2", "a"]) def test_iterate_single_branch_to_base(self): self._assert_iteration("d3cb2", "base", ["d3cb2", "cb2", "b2", "a"]) def test_iterate_multiple_branch_to_base(self): self._assert_iteration( ["d3cb2", "cb1"], "base", ["cb1", "b1", "d3cb2", "cb2", "b2", "a"] ) def test_iterate_multiple_heads_single_base(self): # head d1cb1 is omitted as it is not # a descendant of b2 self._assert_iteration( ["d1cb1", "d2cb2", "d3cb2"], "b2", ["d2cb2", "d3cb2", "cb2", "b2"] ) def test_same_branch_wrong_direction(self): # nodes b1 and d1cb1 are connected, but # db1cb1 is the descendant of b1 assert_raises_message( RevisionError, r"Revision d1cb1 is not an ancestor of revision b1", list, self.map.iterate_revisions("b1", "d1cb1"), ) def test_distinct_branches(self): # nodes db2cb2 and b1 have no path to each other assert_raises_message( RevisionError, r"Revision b1 is not an ancestor of revision d2cb2", list, self.map.iterate_revisions("d2cb2", "b1"), ) def test_wrong_direction_to_base_as_none(self): # this needs to raise and not just return empty iteration # as added by #258 assert_raises_message( RevisionError, r"Revision d1cb1 is not an ancestor of revision base", list, self.map.iterate_revisions(None, "d1cb1"), ) def test_wrong_direction_to_base_as_empty(self): # this needs to raise and not just return empty iteration # as added by #258 assert_raises_message( RevisionError, r"Revision d1cb1 is not an ancestor of revision base", list, self.map.iterate_revisions((), "d1cb1"), ) class BranchTravellingTest(DownIterateTest): """test the order of revs when going along multiple branches. We want depth-first along branches, but then we want to terminate all branches at their branch point before continuing to the nodes preceding that branch. """ def setUp(self): self.map = RevisionMap( lambda: [ Revision("a1", ()), Revision("a2", ("a1",)), Revision("a3", ("a2",)), Revision("b1", ("a3",)), Revision("b2", ("a3",)), Revision("cb1", ("b1",)), Revision("cb2", ("b2",)), Revision("db1", ("cb1",)), Revision("db2", ("cb2",)), Revision("e1b1", ("db1",)), Revision("fe1b1", ("e1b1",)), Revision("e2b1", ("db1",)), Revision("e2b2", ("db2",)), Revision("merge", ("e2b1", "e2b2")), ] ) def test_iterate_one_branch_both_to_merge(self): # test that when we hit a merge point, implicit base will # ensure all branches that supply the merge point are filled in self._assert_iteration( "merge", "db1", ["merge", "e2b1", "db1", "e2b2", "db2", "cb2", "b2"], implicit_base=True, ) def test_three_branches_end_in_single_branch(self): self._assert_iteration( ["merge", "fe1b1"], "a3", [ "fe1b1", "e1b1", "merge", "e2b1", "db1", "cb1", "b1", "e2b2", "db2", "cb2", "b2", "a3", ], ) def test_two_branches_to_root(self): # here we want 'a3' as a "stop" branch point, but *not* # 'db1', as we don't have multiple traversals on db1 self._assert_iteration( "merge", "a1", [ "merge", "e2b1", "db1", "cb1", "b1", # e2b1 branch "e2b2", "db2", "cb2", "b2", # e2b2 branch "a3", # both terminate at a3 "a2", "a1", # finish out ], # noqa ) def test_two_branches_end_in_branch(self): self._assert_iteration( "merge", "b1", # 'b1' is local to 'e2b1' # branch so that is all we get ["merge", "e2b1", "db1", "cb1", "b1"], # noqa ) def test_two_branches_end_behind_branch(self): self._assert_iteration( "merge", "a2", [ "merge", "e2b1", "db1", "cb1", "b1", # e2b1 branch "e2b2", "db2", "cb2", "b2", # e2b2 branch "a3", # both terminate at a3 "a2", ], # noqa ) def test_three_branches_to_root(self): # in this case, both "a3" and "db1" are stop points self._assert_iteration( ["merge", "fe1b1"], "a1", [ "fe1b1", "e1b1", # fe1b1 branch "merge", "e2b1", # e2b1 branch "db1", # fe1b1 and e2b1 branches terminate at db1 "cb1", "b1", # e2b1 branch continued....might be nicer # if this was before the e2b2 branch... "e2b2", "db2", "cb2", "b2", # e2b2 branch "a3", # e2b1 and e2b2 branches terminate at a3 "a2", "a1", # finish out ], # noqa ) def test_three_branches_end_multiple_bases(self): # in this case, both "a3" and "db1" are stop points self._assert_iteration( ["merge", "fe1b1"], ["cb1", "cb2"], [ "fe1b1", "e1b1", "merge", "e2b1", "db1", "cb1", "e2b2", "db2", "cb2", ], ) def test_three_branches_end_multiple_bases_exclusive(self): self._assert_iteration( ["merge", "fe1b1"], ["cb1", "cb2"], ["fe1b1", "e1b1", "merge", "e2b1", "db1", "e2b2", "db2"], inclusive=False, ) def test_detect_invalid_head_selection(self): # db1 is an ancestor of fe1b1 assert_raises_message( RevisionError, "Requested revision fe1b1 overlaps " "with other requested revisions", list, self.map.iterate_revisions(["db1", "b2", "fe1b1"], ()), ) def test_three_branches_end_multiple_bases_exclusive_blank(self): self._assert_iteration( ["e2b1", "b2", "fe1b1"], (), [ "b2", "fe1b1", "e1b1", "e2b1", "db1", "cb1", "b1", "a3", "a2", "a1", ], inclusive=False, ) def test_iterate_to_symbolic_base(self): self._assert_iteration( ["fe1b1"], "base", ["fe1b1", "e1b1", "db1", "cb1", "b1", "a3", "a2", "a1"], inclusive=False, ) def test_ancestor_nodes(self): merge = self.map.get_revision("merge") eq_( { rev.revision for rev in self.map._get_ancestor_nodes([merge], check=True) }, { "a1", "e2b2", "e2b1", "cb2", "merge", "a3", "a2", "b1", "b2", "db1", "db2", "cb1", }, ) class MultipleBaseTest(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("base1", ()), Revision("base2", ()), Revision("base3", ()), Revision("a1a", ("base1",)), Revision("a1b", ("base1",)), Revision("a2", ("base2",)), Revision("a3", ("base3",)), Revision("b1a", ("a1a",)), Revision("b1b", ("a1b",)), Revision("b2", ("a2",)), Revision("b3", ("a3",)), Revision("c2", ("b2",)), Revision("d2", ("c2",)), Revision("mergeb3d2", ("b3", "d2")), ] ) def test_heads_to_base(self): self._assert_iteration( "heads", "base", [ "b1a", "a1a", "b1b", "a1b", "base1", "mergeb3d2", "b3", "a3", "base3", "d2", "c2", "b2", "a2", "base2", ], ) def test_heads_to_base_exclusive(self): self._assert_iteration( "heads", "base", [ "b1a", "a1a", "b1b", "a1b", "base1", "mergeb3d2", "b3", "a3", "base3", "d2", "c2", "b2", "a2", "base2", ], inclusive=False, ) def test_heads_to_blank(self): self._assert_iteration( "heads", None, [ "b1a", "a1a", "b1b", "a1b", "base1", "mergeb3d2", "b3", "a3", "base3", "d2", "c2", "b2", "a2", "base2", ], ) def test_detect_invalid_base_selection(self): assert_raises_message( RevisionError, "overlaps with other requested revisions", list, self.map.iterate_revisions(["c2"], ["a2", "b2"]), ) def test_heads_to_revs_plus_implicit_base_exclusive(self): self._assert_iteration( "heads", ["c2"], [ "b1a", "a1a", "b1b", "a1b", "base1", "mergeb3d2", "b3", "a3", "base3", "d2", ], inclusive=False, implicit_base=True, ) def test_heads_to_revs_base_exclusive(self): self._assert_iteration( "heads", ["c2"], ["mergeb3d2", "d2"], inclusive=False ) def test_heads_to_revs_plus_implicit_base_inclusive(self): self._assert_iteration( "heads", ["c2"], [ "b1a", "a1a", "b1b", "a1b", "base1", "mergeb3d2", "b3", "a3", "base3", "d2", "c2", ], implicit_base=True, ) def test_specific_path_one(self): self._assert_iteration("b3", "base3", ["b3", "a3", "base3"]) def test_specific_path_two_implicit_base(self): self._assert_iteration( ["b3", "b2"], "base3", ["b2", "a2", "base2", "b3", "a3"], inclusive=False, implicit_base=True, ) class MultipleBaseCrossDependencyTestOne(DownIterateTest): def setUp(self): """ Structure:: base1 -----> a1a -> b1a +----> a1b -> b1b | +-----------+ | v base3 -> a3 -> b3 ^ | +-----------+ | base2 -> a2 -> b2 -> c2 -> d2 """ self.map = RevisionMap( lambda: [ Revision("base1", (), branch_labels="b_1"), Revision("a1a", ("base1",)), Revision("a1b", ("base1",)), Revision("b1a", ("a1a",)), Revision("b1b", ("a1b",), dependencies="a3"), Revision("base2", (), branch_labels="b_2"), Revision("a2", ("base2",)), Revision("b2", ("a2",)), Revision("c2", ("b2",), dependencies="a3"), Revision("d2", ("c2",)), Revision("base3", (), branch_labels="b_3"), Revision("a3", ("base3",)), Revision("b3", ("a3",)), ] ) def test_what_are_the_heads(self): eq_(self.map.heads, ("b1a", "b1b", "d2", "b3")) def test_heads_to_base(self): self._assert_iteration( "heads", "base", [ "b1a", "a1a", "b1b", "a1b", "base1", "d2", "c2", "b2", "a2", "base2", "b3", "a3", "base3", ], ) def test_heads_to_base_downgrade(self): self._assert_iteration( "heads", "base", [ "b1a", "a1a", "b1b", "a1b", "base1", "d2", "c2", "b2", "a2", "base2", "b3", "a3", "base3", ], select_for_downgrade=True, ) def test_same_branch_wrong_direction(self): assert_raises_message( RevisionError, r"Revision d2 is not an ancestor of revision b2", list, self.map.iterate_revisions("b2", "d2"), ) def test_different_branch_not_wrong_direction(self): # Changed from empty list. Expect this should raise an error in # --sql mode (since there is not a direct path), or in upgrade mode # it should return revision b3, not an empty list. assert_raises_message( RevisionError, r"Revision d2 is not an ancestor of revision b3", list, self.map.iterate_revisions("b3", "d2"), ) def test_we_need_head2_upgrade(self): # the 2 branch relies on the 3 branch self._assert_iteration( "b_2@head", "base", ["d2", "c2", "b2", "a2", "base2", "a3", "base3"], ) def test_we_need_head2_downgrade(self): # the 2 branch relies on the 3 branch, but # on the downgrade side, don't need to touch the 3 branch self._assert_iteration( "b_2@head", "b_2@base", ["d2", "c2", "b2", "a2", "base2"], select_for_downgrade=True, ) def test_we_need_head3_upgrade(self): # the 3 branch can be upgraded alone. self._assert_iteration("b_3@head", "base", ["b3", "a3", "base3"]) def test_we_need_head3_downgrade(self): # the 3 branch can be upgraded alone. self._assert_iteration( "b_3@head", "base", ["b3", "a3", "base3"], select_for_downgrade=True, ) def test_we_need_head1_upgrade(self): # the 1 branch relies on the 3 branch self._assert_iteration( "b1b@head", "base", ["b1b", "a1b", "base1", "a3", "base3"] ) def test_we_need_head1_downgrade(self): # going down we don't need a3-> base3, as long # as we are limiting the base target self._assert_iteration( "b1b@head", "b1b@base", ["b1b", "a1b", "base1"], select_for_downgrade=True, ) def test_we_need_base2_upgrade(self): # This is an upgrade from base, so deps should be included and # the result should be different to the downgrade case below self._assert_iteration( "heads", "b_2@base", ["d2", "c2", "b2", "a2", "base2", "a3", "base3"], ) def test_we_need_base2_downgrade(self): # consider a downgrade to b_2@base - we # want to run through all the "2"s alone, and we're done. self._assert_iteration( "heads", "b_2@base", ["d2", "c2", "b2", "a2", "base2"], select_for_downgrade=True, ) def test_we_need_base3_upgrade(self): # branch b_3 has no dependencies, so b1b/d2/c2 not needed self._assert_iteration("heads", "b_3@base", ["b3", "a3", "base3"]) def test_we_need_base3_downgrade(self): # consider a downgrade to b_3@base - due to the a3 dependency, we # need to downgrade everything dependent on a3 # as well, which means b1b and c2. Then we can downgrade # the 3s. self._assert_iteration( "heads", "b_3@base", ["b1b", "d2", "c2", "b3", "a3", "base3"], select_for_downgrade=True, ) class MultipleBaseCrossDependencyTestTwo(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("base1", (), branch_labels="b_1"), Revision("a1", "base1"), Revision("b1", "a1"), Revision("c1", "b1"), Revision("base2", (), dependencies="b_1", branch_labels="b_2"), Revision("a2", "base2"), Revision("b2", "a2"), Revision("c2", "b2"), Revision("d2", "c2"), Revision("base3", (), branch_labels="b_3"), Revision("a3", "base3"), Revision("b3", "a3"), Revision("c3", "b3", dependencies="b2"), Revision("d3", "c3"), ] ) def test_what_are_the_heads(self): eq_(self.map.heads, ("c1", "d2", "d3")) def test_heads_to_base(self): self._assert_iteration( "heads", "base", [ "c1", "b1", "a1", "d2", "c2", "d3", "c3", "b3", "a3", "base3", "b2", "a2", "base2", "base1", ], ) def test_we_need_head2(self): self._assert_iteration( "b_2@head", "base", ["d2", "c2", "b2", "a2", "base2", "base1"] ) def test_we_need_head3(self): self._assert_iteration( "b_3@head", "base", ["d3", "c3", "b3", "a3", "base3", "b2", "a2", "base2", "base1"], ) def test_we_need_head1(self): self._assert_iteration("b_1@head", "base", ["c1", "b1", "a1", "base1"]) def test_we_need_base1(self): # b_1 has no dependencies self._assert_iteration( "heads", "b_1@base", [ "c1", "b1", "a1", "base1", ], ) def test_we_need_base2(self): # base2 depends on base1, nobody depends on b_3 so removed d3,c3 self._assert_iteration( "heads", "b_2@base", ["d2", "c2", "b2", "a2", "base2", "base1"] ) def test_we_need_base3(self): # c3 depends on b2 -> add b2,a2,base2, base2 depends on base1 self._assert_iteration( "heads", "b_3@base", ["d3", "c3", "b3", "a3", "base3", "b2", "a2", "base2", "base1"], ) class MultipleBaseCrossDependencyTestThree(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("base1", ()), Revision("a1", "base1"), Revision("b1", "a1"), Revision("c2", (), dependencies="a1"), Revision("c3", "c2"), ] ) def test_traverse_no_parent_but_a_dep(self): self._assert_iteration( "heads", "base", ["b1", "c3", "c2", "a1", "base1"], ) class LargeMapTest(DownIterateTest): def setUp(self): self.map = _large_map.map_ def test_all(self): raw = [r for r in self.map._revision_map.values() if r is not None] revs = [rev for rev in self.map.iterate_revisions("heads", "base")] eq_(set(raw), set(revs)) for idx, rev in enumerate(revs): ancestors = set(self.map._get_ancestor_nodes([rev])).difference( [rev] ) descendants = set( self.map._get_descendant_nodes([rev]) ).difference([rev]) assert not ancestors.intersection(descendants) remaining = set(revs[idx + 1 :]) if remaining: assert remaining.intersection(ancestors) class DepResolutionFailedTest(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("base1", ()), Revision("a1", "base1"), Revision("a2", "base1"), Revision("b1", "a1"), Revision("c1", "b1"), ] ) # intentionally make a broken map self.map._revision_map["fake"] = self.map._revision_map["a2"] self.map._revision_map["b1"].dependencies = "fake" self.map._revision_map["b1"]._resolved_dependencies = ("fake",) self.map._revision_map["b1"]._normalized_resolved_dependencies = ( "fake", ) def test_failure_message(self): iter_ = self.map.iterate_revisions("c1", "base1") assert_raises_message( RevisionError, "Dependency resolution failed;", list, iter_ ) class InvalidRevisionMapTest(TestBase): def _assert_raises_revision_map(self, map_, except_cls, msg): assert_raises_message(except_cls, msg, lambda: map_._revision_map) def _assert_raises_revision_map_loop(self, map_, revision): self._assert_raises_revision_map( map_, LoopDetected, r"^Self-loop is detected in revisions \(%s\)$" % revision, ) def _assert_raises_revision_map_dep_loop(self, map_, revision): self._assert_raises_revision_map( map_, DependencyLoopDetected, r"^Dependency self-loop is detected in revisions \(%s\)$" % revision, ) def _assert_raises_revision_map_cycle(self, map_, revisions): self._assert_raises_revision_map( map_, CycleDetected, r"^Cycle is detected in revisions \(\(%s\)\(, \)?\)+$" % "|".join(revisions), ) def _assert_raises_revision_map_dep_cycle(self, map_, revisions): self._assert_raises_revision_map( map_, DependencyCycleDetected, r"^Dependency cycle is detected in revisions \(\(%s\)\(, \)?\)+$" % "|".join(revisions), ) class GraphWithLoopTest(DownIterateTest, InvalidRevisionMapTest): def test_revision_map_solitary_loop(self): map_ = RevisionMap( lambda: [ Revision("a", "a"), ] ) self._assert_raises_revision_map_loop(map_, "a") def test_revision_dupe_head(self): r1 = Revision("user_foo", None) r2 = Revision("user", "user_foo", dependencies="user_foo") self.map = RevisionMap(lambda: [r1, r2]) self._assert_iteration("heads", None, ["user", "user_foo"]) eq_(self.map._topological_sort([r1, r2], [r2]), ["user", "user_foo"]) def test_revision_map_no_loop_w_overlapping_substrings(self): r1 = Revision("user_foo", None) r2 = Revision("user", "user_foo") self.map = RevisionMap(lambda: [r1, r2]) self._assert_iteration("heads", None, ["user", "user_foo"]) def test_revision_map_no_loop_w_overlapping_substrings_dependencies(self): r1 = Revision("user_foo", None) r2 = Revision("user", None, dependencies="user_foo") self.map = RevisionMap(lambda: [r1, r2]) self._assert_iteration("heads", None, ["user", "user_foo"]) def test_revision_map_base_loop(self): map_ = RevisionMap( lambda: [ Revision("a", "a"), Revision("b", "a"), Revision("c", "b"), ] ) self._assert_raises_revision_map_loop(map_, "a") def test_revision_map_head_loop(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", "a"), Revision("c", ("b", "c")), ] ) self._assert_raises_revision_map_loop(map_, "c") def test_revision_map_branch_point_loop(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", ("a", "b")), Revision("c1", "b"), Revision("c2", "b"), ] ) self._assert_raises_revision_map_loop(map_, "b") def test_revision_map_merge_point_loop(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b1", "a"), Revision("b2", "a"), Revision("c", ("b1", "b2", "c")), ] ) self._assert_raises_revision_map_loop(map_, "c") def test_revision_map_solitary_dependency_loop(self): map_ = RevisionMap( lambda: [ Revision("a", (), dependencies="a"), ] ) self._assert_raises_revision_map_dep_loop(map_, "a") def test_revision_map_base_dependency_loop(self): map_ = RevisionMap( lambda: [ Revision("a", (), dependencies="a"), Revision("b", "a"), Revision("c", "b"), ] ) self._assert_raises_revision_map_dep_loop(map_, "a") def test_revision_map_head_dep_loop(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", "a"), Revision("c", "b", dependencies="c"), ] ) self._assert_raises_revision_map_dep_loop(map_, "c") def test_revision_map_branch_point_dep_loop(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", "a", dependencies="b"), Revision("c1", "b"), Revision("c2", "b"), ] ) self._assert_raises_revision_map_dep_loop(map_, "b") def test_revision_map_merge_point_dep_loop(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b1", "a"), Revision("b2", "a"), Revision("c", ("b1", "b2"), dependencies="c"), ] ) self._assert_raises_revision_map_dep_loop(map_, "c") class GraphWithCycleTest(InvalidRevisionMapTest): def test_revision_map_simple_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", "c"), Revision("b", "a"), Revision("c", "b"), ] ) self._assert_raises_revision_map_cycle(map_, ["a", "b", "c"]) def test_revision_map_extra_simple_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", "c"), Revision("b", "a"), Revision("c", "b"), Revision("d", ()), Revision("e", "d"), ] ) self._assert_raises_revision_map_cycle(map_, ["a", "b", "c"]) def test_revision_map_lower_simple_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", "c"), Revision("b", "a"), Revision("c", "b"), Revision("d", "c"), Revision("e", "d"), ] ) self._assert_raises_revision_map_cycle(map_, ["a", "b", "c", "d", "e"]) def test_revision_map_upper_simple_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", "a"), Revision("c", ("b", "e")), Revision("d", "c"), Revision("e", "d"), ] ) self._assert_raises_revision_map_cycle(map_, ["a", "b", "c", "d", "e"]) def test_revision_map_simple_dep_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", (), dependencies="c"), Revision("b", "a"), Revision("c", "b"), ] ) self._assert_raises_revision_map_dep_cycle(map_, ["a", "b", "c"]) def test_revision_map_extra_simple_dep_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", (), dependencies="c"), Revision("b", "a"), Revision("c", "b"), Revision("d", ()), Revision("e", "d"), ] ) self._assert_raises_revision_map_dep_cycle(map_, ["a", "b", "c"]) def test_revision_map_lower_simple_dep_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", (), dependencies="c"), Revision("b", "a"), Revision("c", "b"), Revision("d", "c"), Revision("e", "d"), ] ) self._assert_raises_revision_map_dep_cycle( map_, ["a", "b", "c", "d", "e"] ) def test_revision_map_upper_simple_dep_cycle(self): map_ = RevisionMap( lambda: [ Revision("a", ()), Revision("b", "a"), Revision("c", "b", dependencies="e"), Revision("d", "c"), Revision("e", "d"), ] ) self._assert_raises_revision_map_dep_cycle( map_, ["a", "b", "c", "d", "e"] ) class NormalizedDownRevTest(DownIterateTest): def setUp(self): self.map = RevisionMap( lambda: [ Revision("a1", ()), Revision("a2", "a1"), Revision("a3", "a2"), Revision("b1", ()), Revision("b2", "b1", dependencies="a3"), Revision("b3", "b2"), Revision("b4", "b3", dependencies="a3"), Revision("b5", "b4", dependencies="b4"), ] ) def test_normalized_down_revisions(self): b4 = self.map.get_revision("b4") eq_(b4._all_down_revisions, ("b3", "a3")) # "a3" is not included because ancestor b2 is also dependent eq_(b4._normalized_down_revisions, ("b3",)) def test_dupe_dependency(self): b5 = self.map.get_revision("b5") eq_(b5._all_down_revisions, ("b4",)) eq_(b5._normalized_down_revisions, ("b4",)) def test_branch_traversal(self): self._assert_iteration( "b4", "b1@base", ["b4", "b3", "b2", "b1"], select_for_downgrade=True, ) def test_all_traversal(self): self._assert_iteration( "heads", "base", ["b5", "b4", "b3", "b2", "b1", "a3", "a2", "a1"], select_for_downgrade=True, ) def test_partial_traversal(self): self._assert_iteration( "heads", "a2", ["b5", "b4", "b3", "b2", "a3", "a2"], select_for_downgrade=True, ) def test_partial_traversal_implicit_base_one(self): self._assert_iteration( "heads", "a2", ["b5", "b4", "b3", "b2", "b1", "a3", "a2"], select_for_downgrade=True, implicit_base=True, ) def test_partial_traversal_implicit_base_two(self): self._assert_iteration( "b5", ("b1",), ["b5", "b4", "b3", "b2", "b1", "a3", "a2", "a1"], implicit_base=True, ) def test_partial_traversal_implicit_base_three(self): map_ = RevisionMap( lambda: [ Revision("c1", ()), Revision("a1", ()), Revision("a2", "a1", dependencies="c1"), Revision("a3", "a2", dependencies="c1"), Revision("b1", ()), Revision("b2", "b1", dependencies="a3"), Revision("b3", "b2"), Revision("b4", "b3", dependencies="a3"), Revision("b5", "b4"), ] ) self._assert_iteration( "b5", ("b1",), ["b5", "b4", "b3", "b2", "b1", "a3", "a2", "a1", "c1"], implicit_base=True, map_=map_, ) alembic-rel_1_7_6/tests/test_script_consumption.py000066400000000000000000000623371417624537100226670ustar00rootroot00000000000000# coding: utf-8 from contextlib import contextmanager import os import re import textwrap import sqlalchemy as sa from alembic import command from alembic import testing from alembic import util from alembic.environment import EnvironmentContext from alembic.script import Script from alembic.script import ScriptDirectory from alembic.testing import assert_raises_message from alembic.testing import config from alembic.testing import eq_ from alembic.testing import mock from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import _sqlite_file_db from alembic.testing.env import _sqlite_testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import env_file_fixture from alembic.testing.env import staging_env from alembic.testing.env import three_rev_fixture from alembic.testing.env import write_script from alembic.testing.fixtures import capture_context_buffer from alembic.testing.fixtures import FutureEngineMixin from alembic.testing.fixtures import TestBase class PatchEnvironment: branched_connection = False @contextmanager def _patch_environment(self, transactional_ddl, transaction_per_migration): conf = EnvironmentContext.configure conn = [None] def configure(*arg, **opt): opt.update( transactional_ddl=transactional_ddl, transaction_per_migration=transaction_per_migration, ) conn[0] = opt["connection"] return conf(*arg, **opt) with mock.patch.object(EnvironmentContext, "configure", configure): yield # it's no longer possible for the conn to be in a transaction # assuming normal env.py as context.begin_transaction() # will always run a real DB transaction, no longer uses autocommit # mode assert not conn[0].in_transaction() @staticmethod def _branched_connection_env(): if config.requirements.sqlalchemy_14.enabled: connect_warning = ( 'r"The Connection.connect\\(\\) method is considered legacy"' ) close_warning = ( 'r"The .close\\(\\) method on a ' "so-called 'branched' connection\"" ) else: connect_warning = close_warning = "" env_file_fixture( textwrap.dedent( """\ import alembic from alembic import context from sqlalchemy import engine_from_config, pool from sqlalchemy.testing import expect_warnings config = context.config target_metadata = None def run_migrations_online(): connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as conn: with expect_warnings(%(connect_warning)s): connection = conn.connect() try: context.configure( connection=connection, target_metadata=target_metadata, ) with context.begin_transaction(): context.run_migrations() finally: with expect_warnings(%(close_warning)s): connection.close() if context.is_offline_mode(): assert False else: run_migrations_online() """ % { "connect_warning": connect_warning, "close_warning": close_warning, } ) ) @testing.combinations( ( False, True, ), ( True, False, ), ( True, True, ), argnames="transactional_ddl,transaction_per_migration", id_="rr", ) class ApplyVersionsFunctionalTest(PatchEnvironment, TestBase): __only_on__ = "sqlite" sourceless = False future = False transactional_ddl = False transaction_per_migration = True branched_connection = False def setUp(self): self.bind = _sqlite_file_db(future=self.future) self.env = staging_env(sourceless=self.sourceless) self.cfg = _sqlite_testing_config( sourceless=self.sourceless, future=self.future ) if self.branched_connection: self._branched_connection_env() def tearDown(self): clear_staging_env() def test_steps(self): with self._patch_environment( self.transactional_ddl, self.transaction_per_migration ): self._test_001_revisions() self._test_002_upgrade() self._test_003_downgrade() self._test_004_downgrade() self._test_005_upgrade() self._test_006_upgrade_again() self._test_007_stamp_upgrade() def _test_001_revisions(self): self.a = a = util.rev_id() self.b = b = util.rev_id() self.c = c = util.rev_id() script = ScriptDirectory.from_config(self.cfg) script.generate_revision(a, None, refresh=True) write_script( script, a, """ revision = '%s' down_revision = None from alembic import op def upgrade(): op.execute("CREATE TABLE foo(id integer)") def downgrade(): op.execute("DROP TABLE foo") """ % a, sourceless=self.sourceless, ) script.generate_revision(b, None, refresh=True) write_script( script, b, """ revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE TABLE bar(id integer)") def downgrade(): op.execute("DROP TABLE bar") """ % (b, a), sourceless=self.sourceless, ) script.generate_revision(c, None, refresh=True) write_script( script, c, """ revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE TABLE bat(id integer)") def downgrade(): op.execute("DROP TABLE bat") """ % (c, b), sourceless=self.sourceless, ) def _test_002_upgrade(self): command.upgrade(self.cfg, self.c) db = self.bind assert db.dialect.has_table(db.connect(), "foo") assert db.dialect.has_table(db.connect(), "bar") assert db.dialect.has_table(db.connect(), "bat") def _test_003_downgrade(self): command.downgrade(self.cfg, self.a) db = self.bind assert db.dialect.has_table(db.connect(), "foo") assert not db.dialect.has_table(db.connect(), "bar") assert not db.dialect.has_table(db.connect(), "bat") def _test_004_downgrade(self): command.downgrade(self.cfg, "base") db = self.bind assert not db.dialect.has_table(db.connect(), "foo") assert not db.dialect.has_table(db.connect(), "bar") assert not db.dialect.has_table(db.connect(), "bat") def _test_005_upgrade(self): command.upgrade(self.cfg, self.b) db = self.bind assert db.dialect.has_table(db.connect(), "foo") assert db.dialect.has_table(db.connect(), "bar") assert not db.dialect.has_table(db.connect(), "bat") def _test_006_upgrade_again(self): command.upgrade(self.cfg, self.b) db = self.bind assert db.dialect.has_table(db.connect(), "foo") assert db.dialect.has_table(db.connect(), "bar") assert not db.dialect.has_table(db.connect(), "bat") def _test_007_stamp_upgrade(self): command.stamp(self.cfg, self.c) db = self.bind assert db.dialect.has_table(db.connect(), "foo") assert db.dialect.has_table(db.connect(), "bar") assert not db.dialect.has_table(db.connect(), "bat") class LegacyApplyVersionsFunctionalTest(ApplyVersionsFunctionalTest): __requires__ = ("sqlalchemy_1x",) branched_connection = True # class level combinations can't do the skips for SQLAlchemy 1.3 # so we have a separate class @testing.combinations( (False, True), (True, False), (True, True), argnames="transactional_ddl,transaction_per_migration", id_="rr", ) class FutureApplyVersionsTest(FutureEngineMixin, ApplyVersionsFunctionalTest): future = True class SimpleSourcelessApplyVersionsTest(ApplyVersionsFunctionalTest): sourceless = "simple" @testing.combinations( ("pep3147_envonly",), ("pep3147_everything",), argnames="sourceless", id_="r", ) class NewFangledSourcelessApplyVersionsTest(ApplyVersionsFunctionalTest): pass class CallbackEnvironmentTest(ApplyVersionsFunctionalTest): exp_kwargs = frozenset(("ctx", "heads", "run_args", "step")) @staticmethod def _env_file_fixture(): env_file_fixture( textwrap.dedent( """\ import alembic from alembic import context from sqlalchemy import engine_from_config, pool config = context.config target_metadata = None def run_migrations_offline(): url = config.get_main_option('sqlalchemy.url') context.configure( url=url, target_metadata=target_metadata, on_version_apply=alembic.mock_event_listener, literal_binds=True) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure( connection=connection, on_version_apply=alembic.mock_event_listener, target_metadata=target_metadata, ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() """ ) ) def test_steps(self): import alembic alembic.mock_event_listener = None self._env_file_fixture() with mock.patch("alembic.mock_event_listener", mock.Mock()) as mymock: super(CallbackEnvironmentTest, self).test_steps() calls = mymock.call_args_list assert calls for call in calls: args, kw = call assert not args assert set(kw.keys()) >= self.exp_kwargs assert kw["run_args"] == {} assert hasattr(kw["ctx"], "get_current_revision") step = kw["step"] assert isinstance(step.is_upgrade, bool) assert isinstance(step.is_stamp, bool) assert isinstance(step.is_migration, bool) assert isinstance(step.up_revision_id, str) assert isinstance(step.up_revision, Script) for revtype in "up", "down", "source", "destination": revs = getattr(step, "%s_revisions" % revtype) assert isinstance(revs, tuple) for rev in revs: assert isinstance(rev, Script) revids = getattr(step, "%s_revision_ids" % revtype) for revid in revids: assert isinstance(revid, str) heads = kw["heads"] assert hasattr(heads, "__iter__") for h in heads: assert h is None or isinstance(h, str) class OfflineTransactionalDDLTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = cfg = _no_sql_testing_config() cfg.set_main_option("dialect_name", "sqlite") cfg.remove_main_option("url") self.a, self.b, self.c = three_rev_fixture(cfg) def tearDown(self): clear_staging_env() def test_begin_commit_transactional_ddl(self): with capture_context_buffer(transactional_ddl=True) as buf: command.upgrade(self.cfg, self.c, sql=True) assert re.match( (r"^BEGIN;\s+CREATE TABLE.*?%s.*" % self.a) + (r".*%s" % self.b) + (r".*%s.*?COMMIT;.*$" % self.c), buf.getvalue(), re.S, ) def test_begin_commit_nontransactional_ddl(self): with capture_context_buffer(transactional_ddl=False) as buf: command.upgrade(self.cfg, self.a, sql=True) assert re.match(r"^CREATE TABLE.*?\n+$", buf.getvalue(), re.S) assert "COMMIT;" not in buf.getvalue() def test_begin_commit_per_rev_ddl(self): with capture_context_buffer(transaction_per_migration=True) as buf: command.upgrade(self.cfg, self.c, sql=True) assert re.match( (r"^BEGIN;\s+CREATE TABLE.*%s.*?COMMIT;.*" % self.a) + (r"BEGIN;.*?%s.*?COMMIT;.*" % self.b) + (r"BEGIN;.*?%s.*?COMMIT;.*$" % self.c), buf.getvalue(), re.S, ) class OnlineTransactionalDDLTest(PatchEnvironment, TestBase): def tearDown(self): clear_staging_env() def _opened_transaction_fixture(self, future=False): self.env = staging_env() if future: self.cfg = _sqlite_testing_config(future=future) else: self.cfg = _sqlite_testing_config() if self.branched_connection: self._branched_connection_env() script = ScriptDirectory.from_config(self.cfg) a = util.rev_id() b = util.rev_id() c = util.rev_id() script.generate_revision(a, "revision a", refresh=True) write_script( script, a, """ "rev a" revision = '%s' down_revision = None def upgrade(): pass def downgrade(): pass """ % (a,), ) script.generate_revision(b, "revision b", refresh=True) write_script( script, b, """ "rev b" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): conn = op.get_bind() # this should fail for a SQLAlchemy 2.0 connection b.c. there is # already a transaction. trans = conn.begin() def downgrade(): pass """ % (b, a), ) script.generate_revision(c, "revision c", refresh=True) write_script( script, c, """ "rev c" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): pass def downgrade(): pass """ % (c, b), ) return a, b, c # these tests might not be supported anymore; the connection is always # going to be in a transaction now even on 1.3. def test_raise_when_rev_leaves_open_transaction(self): a, b, c = self._opened_transaction_fixture() with self._patch_environment( transactional_ddl=False, transaction_per_migration=False ): if config.requirements.sqlalchemy_14.enabled: if self.is_sqlalchemy_future: with testing.expect_raises_message( sa.exc.InvalidRequestError, r".*already", ): command.upgrade(self.cfg, c) else: with testing.expect_sqlalchemy_deprecated_20( r"Calling .begin\(\) when a transaction " "is already begun" ): command.upgrade(self.cfg, c) else: command.upgrade(self.cfg, c) def test_raise_when_rev_leaves_open_transaction_tpm(self): a, b, c = self._opened_transaction_fixture() with self._patch_environment( transactional_ddl=False, transaction_per_migration=True ): if config.requirements.sqlalchemy_14.enabled: if self.is_sqlalchemy_future: with testing.expect_raises_message( sa.exc.InvalidRequestError, r".*already", ): command.upgrade(self.cfg, c) else: with testing.expect_sqlalchemy_deprecated_20( r"Calling .begin\(\) when a transaction is " "already begun" ): command.upgrade(self.cfg, c) else: command.upgrade(self.cfg, c) def test_noerr_rev_leaves_open_transaction_transactional_ddl(self): a, b, c = self._opened_transaction_fixture() with self._patch_environment( transactional_ddl=True, transaction_per_migration=False ): if config.requirements.sqlalchemy_14.enabled: if self.is_sqlalchemy_future: with testing.expect_raises_message( sa.exc.InvalidRequestError, r".*already", ): command.upgrade(self.cfg, c) else: with testing.expect_sqlalchemy_deprecated_20( r"Calling .begin\(\) when a transaction " "is already begun" ): command.upgrade(self.cfg, c) else: command.upgrade(self.cfg, c) def test_noerr_transaction_opened_externally(self): a, b, c = self._opened_transaction_fixture() env_file_fixture( """ from sqlalchemy import engine_from_config, pool def run_migrations_online(): connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: with connection.begin() as real_trans: context.configure( connection=connection, transactional_ddl=False, transaction_per_migration=False ) with context.begin_transaction(): context.run_migrations() run_migrations_online() """ ) command.stamp(self.cfg, c) class BranchedOnlineTransactionalDDLTest(OnlineTransactionalDDLTest): __requires__ = ("sqlalchemy_1x",) branched_connection = True class FutureOnlineTransactionalDDLTest( FutureEngineMixin, OnlineTransactionalDDLTest ): pass class EncodingTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = cfg = _no_sql_testing_config() cfg.set_main_option("dialect_name", "sqlite") cfg.remove_main_option("url") self.a = util.rev_id() script = ScriptDirectory.from_config(cfg) script.generate_revision(self.a, "revision a", refresh=True) write_script( script, self.a, ( """# coding: utf-8 from __future__ import unicode_literals revision = '%s' down_revision = None from alembic import op def upgrade(): op.execute("« S’il vous plaît…") def downgrade(): op.execute("drôle de petite voix m’a réveillé") """ % self.a ), encoding="utf-8", ) def tearDown(self): clear_staging_env() def test_encode(self): with capture_context_buffer( bytes_io=True, output_encoding="utf-8" ) as buf: command.upgrade(self.cfg, self.a, sql=True) assert "« S’il vous plaît…".encode("utf-8") in buf.getvalue() class VersionNameTemplateTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _sqlite_testing_config() def tearDown(self): clear_staging_env() def test_option(self): self.cfg.set_main_option("file_template", "myfile_%%(slug)s") script = ScriptDirectory.from_config(self.cfg) a = util.rev_id() script.generate_revision(a, "some message", refresh=True) write_script( script, a, """ revision = '%s' down_revision = None from alembic import op def upgrade(): op.execute("CREATE TABLE foo(id integer)") def downgrade(): op.execute("DROP TABLE foo") """ % a, ) script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision(a) eq_(rev.revision, a) eq_(os.path.basename(rev.path), "myfile_some_message.py") def test_lookup_legacy(self): self.cfg.set_main_option("file_template", "%%(rev)s") script = ScriptDirectory.from_config(self.cfg) a = util.rev_id() script.generate_revision(a, None, refresh=True) write_script( script, a, """ down_revision = None from alembic import op def upgrade(): op.execute("CREATE TABLE foo(id integer)") def downgrade(): op.execute("DROP TABLE foo") """, ) script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision(a) eq_(rev.revision, a) eq_(os.path.basename(rev.path), "%s.py" % a) def test_error_on_new_with_missing_revision(self): self.cfg.set_main_option("file_template", "%%(slug)s_%%(rev)s") script = ScriptDirectory.from_config(self.cfg) a = util.rev_id() script.generate_revision(a, "foobar", refresh=True) path = script.get_revision(a).path with open(path, "w") as fp: fp.write( """ down_revision = None from alembic import op def upgrade(): op.execute("CREATE TABLE foo(id integer)") def downgrade(): op.execute("DROP TABLE foo") """ ) pyc_path = util.pyc_file_from_path(path) if pyc_path is not None and os.access(pyc_path, os.F_OK): os.unlink(pyc_path) assert_raises_message( util.CommandError, "Could not determine revision id from filename foobar_%s.py. " "Be sure the 'revision' variable is declared " "inside the script." % a, Script._from_path, script, path, ) class IgnoreFilesTest(TestBase): sourceless = False def setUp(self): self.bind = _sqlite_file_db() self.env = staging_env(sourceless=self.sourceless) self.cfg = _sqlite_testing_config(sourceless=self.sourceless) def tearDown(self): clear_staging_env() def _test_ignore_file_py(self, fname): command.revision(self.cfg, message="some rev") script = ScriptDirectory.from_config(self.cfg) path = os.path.join(script.versions, fname) with open(path, "w") as f: f.write("crap, crap -> crap") command.revision(self.cfg, message="another rev") script.get_revision("head") def _test_ignore_init_py(self, ext): """test that __init__.py is ignored.""" self._test_ignore_file_py("__init__.%s" % ext) def _test_ignore_dot_hash_py(self, ext): """test that .#test.py is ignored.""" self._test_ignore_file_py(".#test.%s" % ext) def test_ignore_init_py(self): self._test_ignore_init_py("py") def test_ignore_init_pyc(self): self._test_ignore_init_py("pyc") def test_ignore_init_pyx(self): self._test_ignore_init_py("pyx") def test_ignore_init_pyo(self): self._test_ignore_init_py("pyo") def test_ignore_dot_hash_py(self): self._test_ignore_dot_hash_py("py") def test_ignore_dot_hash_pyc(self): self._test_ignore_dot_hash_py("pyc") def test_ignore_dot_hash_pyx(self): self._test_ignore_dot_hash_py("pyx") def test_ignore_dot_hash_pyo(self): self._test_ignore_dot_hash_py("pyo") class SimpleSourcelessIgnoreFilesTest(IgnoreFilesTest): sourceless = "simple" class NewFangledEnvOnlySourcelessIgnoreFilesTest(IgnoreFilesTest): sourceless = "pep3147_envonly" class NewFangledEverythingSourcelessIgnoreFilesTest(IgnoreFilesTest): sourceless = "pep3147_everything" class SourcelessNeedsFlagTest(TestBase): def setUp(self): self.env = staging_env(sourceless=False) self.cfg = _sqlite_testing_config() def tearDown(self): clear_staging_env() def test_needs_flag(self): a = util.rev_id() script = ScriptDirectory.from_config(self.cfg) script.generate_revision(a, None, refresh=True) write_script( script, a, """ revision = '%s' down_revision = None from alembic import op def upgrade(): op.execute("CREATE TABLE foo(id integer)") def downgrade(): op.execute("DROP TABLE foo") """ % a, sourceless=True, ) script = ScriptDirectory.from_config(self.cfg) eq_(script.get_heads(), []) self.cfg.set_main_option("sourceless", "true") script = ScriptDirectory.from_config(self.cfg) eq_(script.get_heads(), [a]) alembic-rel_1_7_6/tests/test_script_production.py000066400000000000000000001234361417624537100224750ustar00rootroot00000000000000import datetime import os import re from dateutil import tz import sqlalchemy as sa from sqlalchemy import inspect from alembic import autogenerate from alembic import command from alembic import util from alembic.environment import EnvironmentContext from alembic.operations import ops from alembic.script import ScriptDirectory from alembic.testing import assert_raises_message from alembic.testing import assertions from alembic.testing import eq_ from alembic.testing import expect_raises_message from alembic.testing import is_ from alembic.testing import mock from alembic.testing import ne_ from alembic.testing.env import _get_staging_directory from alembic.testing.env import _multi_dir_testing_config from alembic.testing.env import _multidb_testing_config from alembic.testing.env import _no_sql_testing_config from alembic.testing.env import _sqlite_file_db from alembic.testing.env import _sqlite_testing_config from alembic.testing.env import _testing_config from alembic.testing.env import clear_staging_env from alembic.testing.env import env_file_fixture from alembic.testing.env import script_file_fixture from alembic.testing.env import staging_env from alembic.testing.env import three_rev_fixture from alembic.testing.env import write_script from alembic.testing.fixtures import TestBase from alembic.util import CommandError try: from unittest.mock import patch except ImportError: from mock import patch # noqa env, abc, def_ = None, None, None class GeneralOrderedTests(TestBase): def setUp(self): global env env = staging_env() def tearDown(self): clear_staging_env() def test_steps(self): self._test_001_environment() self._test_002_rev_ids() self._test_003_api_methods_clean() self._test_004_rev() self._test_005_nextrev() self._test_006_from_clean_env() self._test_007_long_name() self._test_008_long_name_configurable() def _test_001_environment(self): assert_set = set(["env.py", "script.py.mako", "README"]) eq_(assert_set.intersection(os.listdir(env.dir)), assert_set) def _test_002_rev_ids(self): global abc, def_ abc = util.rev_id() def_ = util.rev_id() ne_(abc, def_) def _test_003_api_methods_clean(self): eq_(env.get_heads(), []) eq_(env.get_base(), None) def _test_004_rev(self): script = env.generate_revision(abc, "this is a message", refresh=True) eq_(script.doc, "this is a message") eq_(script.revision, abc) eq_(script.down_revision, None) assert os.access( os.path.join(env.dir, "versions", "%s_this_is_a_message.py" % abc), os.F_OK, ) assert callable(script.module.upgrade) eq_(env.get_heads(), [abc]) eq_(env.get_base(), abc) def _test_005_nextrev(self): script = env.generate_revision( def_, "this is the next rev", refresh=True ) assert os.access( os.path.join( env.dir, "versions", "%s_this_is_the_next_rev.py" % def_ ), os.F_OK, ) eq_(script.revision, def_) eq_(script.down_revision, abc) eq_(env.get_revision(abc).nextrev, set([def_])) assert script.module.down_revision == abc assert callable(script.module.upgrade) assert callable(script.module.downgrade) eq_(env.get_heads(), [def_]) eq_(env.get_base(), abc) def _test_006_from_clean_env(self): # test the environment so far with a # new ScriptDirectory instance. env = staging_env(create=False) abc_rev = env.get_revision(abc) def_rev = env.get_revision(def_) eq_(abc_rev.nextrev, set([def_])) eq_(abc_rev.revision, abc) eq_(def_rev.down_revision, abc) eq_(env.get_heads(), [def_]) eq_(env.get_base(), abc) def _test_007_long_name(self): rid = util.rev_id() env.generate_revision( rid, "this is a really long name with " "lots of characters and also " "I'd like it to\nhave\nnewlines", ) assert os.access( os.path.join( env.dir, "versions", "%s_this_is_a_really_long_name_with_lots_of_.py" % rid, ), os.F_OK, ) def _test_008_long_name_configurable(self): env.truncate_slug_length = 60 rid = util.rev_id() env.generate_revision( rid, "this is a really long name with " "lots of characters and also " "I'd like it to\nhave\nnewlines", ) assert os.access( os.path.join( env.dir, "versions", "%s_this_is_a_really_long_name_with_lots_" "of_characters_and_also_.py" % rid, ), os.F_OK, ) class ScriptNamingTest(TestBase): @classmethod def setup_class(cls): _testing_config() @classmethod def teardown_class(cls): clear_staging_env() def test_args(self): script = ScriptDirectory( _get_staging_directory(), file_template="%(rev)s_%(slug)s_" "%(year)s_%(month)s_" "%(day)s_%(hour)s_" "%(minute)s_%(second)s", ) create_date = datetime.datetime(2012, 7, 25, 15, 8, 5) eq_( script._rev_path( script.versions, "12345", "this is a message", create_date ), os.path.abspath( "%s/versions/12345_this_is_a_" "message_2012_7_25_15_8_5.py" % _get_staging_directory() ), ) def _test_tz(self, timezone_arg, given, expected): script = ScriptDirectory( _get_staging_directory(), file_template="%(rev)s_%(slug)s_" "%(year)s_%(month)s_" "%(day)s_%(hour)s_" "%(minute)s_%(second)s", timezone=timezone_arg, ) with mock.patch( "alembic.script.base.datetime", mock.Mock( datetime=mock.Mock(utcnow=lambda: given, now=lambda: given) ), ): create_date = script._generate_create_date() eq_(create_date, expected) def test_custom_tz(self): self._test_tz( "EST5EDT", datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime( 2012, 7, 25, 11, 8, 5, tzinfo=tz.gettz("EST5EDT") ), ) def test_custom_tz_lowercase(self): self._test_tz( "est5edt", datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime( 2012, 7, 25, 11, 8, 5, tzinfo=tz.gettz("EST5EDT") ), ) def test_custom_tz_utc(self): self._test_tz( "utc", datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime(2012, 7, 25, 15, 8, 5, tzinfo=tz.gettz("UTC")), ) def test_custom_tzdata_tz(self): self._test_tz( "Europe/Berlin", datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime( 2012, 7, 25, 17, 8, 5, tzinfo=tz.gettz("Europe/Berlin") ), ) def test_default_tz(self): self._test_tz( None, datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime(2012, 7, 25, 15, 8, 5), ) def test_tz_cant_locate(self): assert_raises_message( CommandError, "Can't locate timezone: fake", self._test_tz, "fake", datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime(2012, 7, 25, 15, 8, 5), ) def test_no_dateutil_module(self): with patch("alembic.script.base.tz", new=None): with expect_raises_message( CommandError, "The library 'python-dateutil' is required" ): self._test_tz( "utc", datetime.datetime(2012, 7, 25, 15, 8, 5), datetime.datetime(2012, 7, 25, 15, 8, 5), ) class RevisionCommandTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _sqlite_testing_config() self.a, self.b, self.c = three_rev_fixture(self.cfg) def tearDown(self): clear_staging_env() def test_create_script_basic(self): rev = command.revision(self.cfg, message="some message") script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision(rev.revision) eq_(rev.down_revision, self.c) assert "some message" in rev.doc def test_create_script_splice(self): rev = command.revision( self.cfg, message="some message", head=self.b, splice=True ) script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision(rev.revision) eq_(rev.down_revision, self.b) assert "some message" in rev.doc eq_(set(script.get_heads()), set([rev.revision, self.c])) def test_create_script_missing_splice(self): assert_raises_message( util.CommandError, "Revision %s is not a head revision; please specify --splice " "to create a new branch from this revision" % self.b, command.revision, self.cfg, message="some message", head=self.b, ) def test_illegal_revision_chars(self): assert_raises_message( util.CommandError, r"Character\(s\) '-' not allowed in " "revision identifier 'no-dashes'", command.revision, self.cfg, message="some message", rev_id="no-dashes", ) assert not os.path.exists( os.path.join(self.env.dir, "versions", "no-dashes_some_message.py") ) assert_raises_message( util.CommandError, r"Character\(s\) '@' not allowed in " "revision identifier 'no@atsigns'", command.revision, self.cfg, message="some message", rev_id="no@atsigns", ) assert_raises_message( util.CommandError, r"Character\(s\) '-, @' not allowed in revision " "identifier 'no@atsigns-ordashes'", command.revision, self.cfg, message="some message", rev_id="no@atsigns-ordashes", ) assert_raises_message( util.CommandError, r"Character\(s\) '\+' not allowed in revision " r"identifier 'no\+plussignseither'", command.revision, self.cfg, message="some message", rev_id="no+plussignseither", ) def test_create_script_branches(self): rev = command.revision( self.cfg, message="some message", branch_label="foobar" ) script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision(rev.revision) eq_(script.get_revision("foobar"), rev) def test_create_script_branches_old_template(self): script = ScriptDirectory.from_config(self.cfg) with open(os.path.join(script.dir, "script.py.mako"), "w") as file_: file_.write( "<%text># ${message}\n" "revision = ${repr(up_revision)}\n" "down_revision = ${repr(down_revision)}\n\n" "def upgrade():\n" " ${upgrades if upgrades else 'pass'}\n\n" "def downgrade():\n" " ${downgrade if downgrades else 'pass'}\n\n" ) # works OK if no branch names command.revision(self.cfg, message="some message") assert_raises_message( util.CommandError, r"Version \w+ specified branch_labels foobar, " r"however the migration file .+?\b does not have them; have you " "upgraded your script.py.mako to include the 'branch_labels' " r"section\?", command.revision, self.cfg, message="some message", branch_label="foobar", ) class CustomizeRevisionTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _multi_dir_testing_config() self.cfg.set_main_option("revision_environment", "true") script = ScriptDirectory.from_config(self.cfg) self.model1 = util.rev_id() self.model2 = util.rev_id() self.model3 = util.rev_id() for model, name in [ (self.model1, "model1"), (self.model2, "model2"), (self.model3, "model3"), ]: script.generate_revision( model, name, refresh=True, version_path=os.path.join(_get_staging_directory(), name), head="base", ) write_script( script, model, """\ "%s" revision = '%s' down_revision = None branch_labels = ['%s'] from alembic import op def upgrade(): pass def downgrade(): pass """ % (name, model, name), ) def tearDown(self): clear_staging_env() def _env_fixture(self, fn, target_metadata): self.engine = engine = _sqlite_file_db() def run_env(self): from alembic import context with engine.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, process_revision_directives=fn, ) with context.begin_transaction(): context.run_migrations() return mock.patch( "alembic.script.base.ScriptDirectory.run_env", run_env ) def test_new_locations_no_autogen(self): m = sa.MetaData() def process_revision_directives(context, rev, generate_revisions): generate_revisions[:] = [ ops.MigrationScript( util.rev_id(), ops.UpgradeOps(), ops.DowngradeOps(), version_path=os.path.join( _get_staging_directory(), "model1" ), head="model1@head", ), ops.MigrationScript( util.rev_id(), ops.UpgradeOps(), ops.DowngradeOps(), version_path=os.path.join( _get_staging_directory(), "model2" ), head="model2@head", ), ops.MigrationScript( util.rev_id(), ops.UpgradeOps(), ops.DowngradeOps(), version_path=os.path.join( _get_staging_directory(), "model3" ), head="model3@head", ), ] with self._env_fixture(process_revision_directives, m): revs = command.revision(self.cfg, message="some message") script = ScriptDirectory.from_config(self.cfg) for rev, model in [ (revs[0], "model1"), (revs[1], "model2"), (revs[2], "model3"), ]: rev_script = script.get_revision(rev.revision) eq_( rev_script.path, os.path.abspath( os.path.join( _get_staging_directory(), model, "%s_.py" % (rev_script.revision,), ) ), ) assert os.path.exists(rev_script.path) def test_renders_added_directives_no_autogen(self): m = sa.MetaData() def process_revision_directives(context, rev, generate_revisions): generate_revisions[0].upgrade_ops.ops.append( ops.CreateIndexOp("some_index", "some_table", ["a", "b"]) ) with self._env_fixture(process_revision_directives, m): rev = command.revision( self.cfg, message="some message", head="model1@head", sql=True ) with mock.patch.object(rev.module, "op") as op_mock: rev.module.upgrade() eq_( op_mock.mock_calls, [ mock.call.create_index( "some_index", "some_table", ["a", "b"], unique=False ) ], ) def test_autogen(self): m = sa.MetaData() sa.Table("t", m, sa.Column("x", sa.Integer)) def process_revision_directives(context, rev, generate_revisions): existing_upgrades = generate_revisions[0].upgrade_ops existing_downgrades = generate_revisions[0].downgrade_ops # model1 will run the upgrades, e.g. create the table, # model2 will run the downgrades as upgrades, e.g. drop # the table again generate_revisions[:] = [ ops.MigrationScript( util.rev_id(), existing_upgrades, ops.DowngradeOps(), version_path=os.path.join( _get_staging_directory(), "model1" ), head="model1@head", ), ops.MigrationScript( util.rev_id(), ops.UpgradeOps(ops=existing_downgrades.ops), ops.DowngradeOps(), version_path=os.path.join( _get_staging_directory(), "model2" ), head="model2@head", ), ] with self._env_fixture(process_revision_directives, m): command.upgrade(self.cfg, "heads") eq_(inspect(self.engine).get_table_names(), ["alembic_version"]) command.revision( self.cfg, message="some message", autogenerate=True ) command.upgrade(self.cfg, "model1@head") eq_( inspect(self.engine).get_table_names(), ["alembic_version", "t"], ) command.upgrade(self.cfg, "model2@head") eq_(inspect(self.engine).get_table_names(), ["alembic_version"]) def test_programmatic_command_option(self): def process_revision_directives(context, rev, generate_revisions): generate_revisions[0].message = "test programatic" generate_revisions[0].upgrade_ops = ops.UpgradeOps( ops=[ ops.CreateTableOp( "test_table", [ sa.Column("id", sa.Integer(), primary_key=True), sa.Column("name", sa.String(50), nullable=False), ], ) ] ) generate_revisions[0].downgrade_ops = ops.DowngradeOps( ops=[ops.DropTableOp("test_table")] ) with self._env_fixture(None, None): rev = command.revision( self.cfg, head="model1@head", process_revision_directives=process_revision_directives, ) with open(rev.path) as handle: result = handle.read() assert ( ( """ def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('test_table', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### """ ) in result ) class ScriptAccessorTest(TestBase): def test_upgrade_downgrade_ops_list_accessors(self): u1 = ops.UpgradeOps(ops=[]) d1 = ops.DowngradeOps(ops=[]) m1 = ops.MigrationScript("somerev", u1, d1) is_(m1.upgrade_ops, u1) is_(m1.downgrade_ops, d1) u2 = ops.UpgradeOps(ops=[]) d2 = ops.DowngradeOps(ops=[]) m1._upgrade_ops.append(u2) m1._downgrade_ops.append(d2) assert_raises_message( ValueError, "This MigrationScript instance has a multiple-entry list for " "UpgradeOps; please use the upgrade_ops_list attribute.", getattr, m1, "upgrade_ops", ) assert_raises_message( ValueError, "This MigrationScript instance has a multiple-entry list for " "DowngradeOps; please use the downgrade_ops_list attribute.", getattr, m1, "downgrade_ops", ) eq_(m1.upgrade_ops_list, [u1, u2]) eq_(m1.downgrade_ops_list, [d1, d2]) class ImportsTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _sqlite_testing_config() def tearDown(self): clear_staging_env() def _env_fixture(self, target_metadata, **kw): self.engine = engine = _sqlite_file_db() def run_env(self): from alembic import context with engine.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, **kw ) with context.begin_transaction(): context.run_migrations() return mock.patch( "alembic.script.base.ScriptDirectory.run_env", run_env ) def test_imports_in_script(self): from sqlalchemy import MetaData, Table, Column from sqlalchemy.dialects.mysql import VARCHAR type_ = VARCHAR(20, charset="utf8", national=True) m = MetaData() Table("t", m, Column("x", type_)) def process_revision_directives(context, rev, generate_revisions): generate_revisions[0].imports.add( "from sqlalchemy.dialects.mysql import TINYINT" ) with self._env_fixture( m, process_revision_directives=process_revision_directives ): rev = command.revision( self.cfg, message="some message", autogenerate=True ) with open(rev.path) as file_: contents = file_.read() assert "from sqlalchemy.dialects import mysql" in contents assert "from sqlalchemy.dialects.mysql import TINYINT" in contents class MultiContextTest(TestBase): """test the multidb template for autogenerate front-to-back""" def setUp(self): self.engine1 = _sqlite_file_db(tempname="eng1.db") self.engine2 = _sqlite_file_db(tempname="eng2.db") self.engine3 = _sqlite_file_db(tempname="eng3.db") self.env = staging_env(template="multidb") self.cfg = _multidb_testing_config( { "engine1": self.engine1, "engine2": self.engine2, "engine3": self.engine3, } ) def _write_metadata(self, meta): path = os.path.join(_get_staging_directory(), "scripts", "env.py") with open(path) as env_: existing_env = env_.read() existing_env = existing_env.replace("target_metadata = {}", meta) with open(path, "w") as env_: env_.write(existing_env) def tearDown(self): clear_staging_env() def test_autogen(self): self._write_metadata( """ import sqlalchemy as sa m1 = sa.MetaData() m2 = sa.MetaData() m3 = sa.MetaData() target_metadata = {"engine1": m1, "engine2": m2, "engine3": m3} sa.Table('e1t1', m1, sa.Column('x', sa.Integer)) sa.Table('e2t1', m2, sa.Column('y', sa.Integer)) sa.Table('e3t1', m3, sa.Column('z', sa.Integer)) """ ) rev = command.revision( self.cfg, message="some message", autogenerate=True ) with mock.patch.object(rev.module, "op") as op_mock: rev.module.upgrade_engine1() eq_( op_mock.mock_calls[-1], mock.call.create_table("e1t1", mock.ANY), ) rev.module.upgrade_engine2() eq_( op_mock.mock_calls[-1], mock.call.create_table("e2t1", mock.ANY), ) rev.module.upgrade_engine3() eq_( op_mock.mock_calls[-1], mock.call.create_table("e3t1", mock.ANY), ) rev.module.downgrade_engine1() eq_(op_mock.mock_calls[-1], mock.call.drop_table("e1t1")) rev.module.downgrade_engine2() eq_(op_mock.mock_calls[-1], mock.call.drop_table("e2t1")) rev.module.downgrade_engine3() eq_(op_mock.mock_calls[-1], mock.call.drop_table("e3t1")) class RewriterTest(TestBase): def test_all_traverse(self): writer = autogenerate.Rewriter() mocker = mock.Mock(side_effect=lambda context, revision, op: op) writer.rewrites(ops.MigrateOperation)(mocker) addcolop = ops.AddColumnOp("t1", sa.Column("x", sa.Integer())) directives = [ ops.MigrationScript( util.rev_id(), ops.UpgradeOps(ops=[ops.ModifyTableOps("t1", ops=[addcolop])]), ops.DowngradeOps(ops=[]), ) ] ctx, rev = mock.Mock(), mock.Mock() writer(ctx, rev, directives) eq_( mocker.mock_calls, [ mock.call(ctx, rev, directives[0]), mock.call(ctx, rev, directives[0].upgrade_ops), mock.call(ctx, rev, directives[0].upgrade_ops.ops[0]), mock.call(ctx, rev, addcolop), mock.call(ctx, rev, directives[0].downgrade_ops), ], ) def test_double_migrate_table(self): writer = autogenerate.Rewriter() idx_ops = [] @writer.rewrites(ops.ModifyTableOps) def second_table(context, revision, op): return [ op, ops.ModifyTableOps( "t2", ops=[ops.AddColumnOp("t2", sa.Column("x", sa.Integer()))], ), ] @writer.rewrites(ops.AddColumnOp) def add_column(context, revision, op): idx_op = ops.CreateIndexOp("ixt", op.table_name, [op.column.name]) idx_ops.append(idx_op) return [op, idx_op] directives = [ ops.MigrationScript( util.rev_id(), ops.UpgradeOps( ops=[ ops.ModifyTableOps( "t1", ops=[ ops.AddColumnOp( "t1", sa.Column("x", sa.Integer()) ) ], ) ] ), ops.DowngradeOps(ops=[]), ) ] ctx, rev = mock.Mock(), mock.Mock() writer(ctx, rev, directives) eq_( [d.table_name for d in directives[0].upgrade_ops.ops], ["t1", "t2"] ) is_(directives[0].upgrade_ops.ops[0].ops[1], idx_ops[0]) is_(directives[0].upgrade_ops.ops[1].ops[1], idx_ops[1]) def test_chained_ops(self): writer1 = autogenerate.Rewriter() writer2 = autogenerate.Rewriter() @writer1.rewrites(ops.AddColumnOp) def add_column_nullable(context, revision, op): if op.column.nullable: return op else: op.column.nullable = True return [ op, ops.AlterColumnOp( op.table_name, op.column.name, modify_nullable=False, existing_type=op.column.type, ), ] @writer2.rewrites(ops.AddColumnOp) def add_column_idx(context, revision, op): idx_op = ops.CreateIndexOp("ixt", op.table_name, [op.column.name]) return [op, idx_op] directives = [ ops.MigrationScript( util.rev_id(), ops.UpgradeOps( ops=[ ops.ModifyTableOps( "t1", ops=[ ops.AddColumnOp( "t1", sa.Column( "x", sa.Integer(), nullable=False ), ) ], ) ] ), ops.DowngradeOps(ops=[]), ) ] ctx, rev = mock.Mock(), mock.Mock() writer1.chain(writer2)(ctx, rev, directives) eq_( autogenerate.render_python_code(directives[0].upgrade_ops), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.add_column('t1', " "sa.Column('x', sa.Integer(), nullable=True))\n" " op.create_index('ixt', 't1', ['x'], unique=False)\n" " op.alter_column('t1', 'x',\n" " existing_type=sa.Integer(),\n" " nullable=False)\n" " # ### end Alembic commands ###", ) def test_no_needless_pass(self): writer1 = autogenerate.Rewriter() @writer1.rewrites(ops.AlterColumnOp) def rewrite_alter_column(context, revision, op): return [] directives = [ ops.MigrationScript( util.rev_id(), ops.UpgradeOps( ops=[ ops.ModifyTableOps( "t1", ops=[ ops.AlterColumnOp( "foo", "bar", modify_nullable=False, existing_type=sa.Integer(), ), ops.AlterColumnOp( "foo", "bar", modify_nullable=False, existing_type=sa.Integer(), ), ], ), ops.ModifyTableOps( "t1", ops=[ ops.AlterColumnOp( "foo", "bar", modify_nullable=False, existing_type=sa.Integer(), ) ], ), ] ), ops.DowngradeOps(ops=[]), ) ] ctx, rev = mock.Mock(), mock.Mock() writer1(ctx, rev, directives) eq_( autogenerate.render_python_code(directives[0].upgrade_ops), "# ### commands auto generated by Alembic - please adjust! ###\n" " pass\n" " # ### end Alembic commands ###", ) def test_multiple_passes_with_mutations(self): writer1 = autogenerate.Rewriter() @writer1.rewrites(ops.CreateTableOp) def rewrite_alter_column(context, revision, op): op.table_name += "_pass" return op directives = [ ops.MigrationScript( util.rev_id(), ops.UpgradeOps( ops=[ ops.CreateTableOp( "test_table", [sa.Column("id", sa.Integer(), primary_key=True)], ) ] ), ops.DowngradeOps(ops=[]), ) ] ctx, rev = mock.Mock(), mock.Mock() writer1(ctx, rev, directives) directives[0].upgrade_ops_list.extend( [ ops.UpgradeOps( ops=[ ops.CreateTableOp( "another_test_table", [sa.Column("id", sa.Integer(), primary_key=True)], ) ] ), ops.UpgradeOps( ops=[ ops.CreateTableOp( "third_test_table", [sa.Column("id", sa.Integer(), primary_key=True)], ) ] ), ] ) writer1(ctx, rev, directives) eq_( autogenerate.render_python_code(directives[0].upgrade_ops_list[0]), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('test_table_pass',\n" " sa.Column('id', sa.Integer(), nullable=False),\n" " sa.PrimaryKeyConstraint('id')\n" " )\n" " # ### end Alembic commands ###", ) eq_( autogenerate.render_python_code(directives[0].upgrade_ops_list[1]), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('another_test_table_pass',\n" " sa.Column('id', sa.Integer(), nullable=False),\n" " sa.PrimaryKeyConstraint('id')\n" " )\n" " # ### end Alembic commands ###", ) eq_( autogenerate.render_python_code(directives[0].upgrade_ops_list[2]), "# ### commands auto generated by Alembic - please adjust! ###\n" " op.create_table('third_test_table_pass',\n" " sa.Column('id', sa.Integer(), nullable=False),\n" " sa.PrimaryKeyConstraint('id')\n" " )\n" " # ### end Alembic commands ###", ) class MultiDirRevisionCommandTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _multi_dir_testing_config() def tearDown(self): clear_staging_env() def test_multiple_dir_no_bases(self): assert_raises_message( util.CommandError, "Multiple version locations present, please specify " "--version-path", command.revision, self.cfg, message="some message", ) def test_multiple_dir_no_bases_invalid_version_path(self): assert_raises_message( util.CommandError, "Path foo/bar/ is not represented in current version locations", command.revision, self.cfg, message="x", version_path=os.path.join("foo/bar/"), ) def test_multiple_dir_no_bases_version_path(self): script = command.revision( self.cfg, message="x", version_path=os.path.join(_get_staging_directory(), "model1"), ) assert os.access(script.path, os.F_OK) def test_multiple_dir_chooses_base(self): command.revision( self.cfg, message="x", head="base", version_path=os.path.join(_get_staging_directory(), "model1"), ) script2 = command.revision( self.cfg, message="y", head="base", version_path=os.path.join(_get_staging_directory(), "model2"), ) script3 = command.revision( self.cfg, message="y2", head=script2.revision ) eq_( os.path.dirname(script3.path), os.path.abspath(os.path.join(_get_staging_directory(), "model2")), ) assert os.access(script3.path, os.F_OK) class TemplateArgsTest(TestBase): def setUp(self): staging_env() self.cfg = _no_sql_testing_config( directives="\nrevision_environment=true\n" ) def tearDown(self): clear_staging_env() def test_args_propagate(self): config = _no_sql_testing_config() script = ScriptDirectory.from_config(config) template_args = {"x": "x1", "y": "y1", "z": "z1"} env = EnvironmentContext(config, script, template_args=template_args) env.configure( dialect_name="sqlite", template_args={"y": "y2", "q": "q1"} ) eq_(template_args, {"x": "x1", "y": "y2", "z": "z1", "q": "q1"}) def test_tmpl_args_revision(self): env_file_fixture( """ context.configure(dialect_name='sqlite', template_args={"somearg":"somevalue"}) """ ) script_file_fixture( """ # somearg: ${somearg} revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} """ ) command.revision(self.cfg, message="some rev") script = ScriptDirectory.from_config(self.cfg) rev = script.get_revision("head") with open(rev.path) as f: text = f.read() assert "somearg: somevalue" in text def test_bad_render(self): env_file_fixture( """ context.configure(dialect_name='sqlite', template_args={"somearg":"somevalue"}) """ ) script_file_fixture( """ <% z = x + y %> """ ) try: command.revision(self.cfg, message="some rev") except CommandError as ce: m = re.match( r"^Template rendering failed; see (.+?) " "for a template-oriented", str(ce), ) assert m, "Command error did not produce a file" with open(m.group(1)) as handle: contents = handle.read() os.remove(m.group(1)) assert "<% z = x + y %>" in contents class DuplicateVersionLocationsTest(TestBase): def setUp(self): self.env = staging_env() self.cfg = _multi_dir_testing_config( # this is a duplicate of one of the paths # already present in this fixture extra_version_location="%(here)s/model1" ) script = ScriptDirectory.from_config(self.cfg) self.model1 = "ccc" + util.rev_id() self.model2 = "bbb" + util.rev_id() self.model3 = "aaa" + util.rev_id() for model, name in [ (self.model1, "model1"), (self.model2, "model2"), (self.model3, "model3"), ]: script.generate_revision( model, name, refresh=True, version_path=os.path.join(_get_staging_directory(), name), head="base", ) write_script( script, model, """\ "%s" revision = '%s' down_revision = None branch_labels = ['%s'] from alembic import op def upgrade(): pass def downgrade(): pass """ % (name, model, name), ) def tearDown(self): clear_staging_env() def test_env_emits_warning(self): msg = ( "File %s loaded twice! ignoring. " "Please ensure version_locations is unique." % ( os.path.realpath( os.path.join( _get_staging_directory(), "model1", "%s_model1.py" % self.model1, ) ) ) ) with assertions.expect_warnings(msg, regex=False): script = ScriptDirectory.from_config(self.cfg) script.revision_map.heads eq_( [rev.revision for rev in script.walk_revisions()], [self.model1, self.model2, self.model3], ) class NormPathTest(TestBase): def setUp(self): self.env = staging_env() def tearDown(self): clear_staging_env() def test_script_location(self): config = _no_sql_testing_config() script = ScriptDirectory.from_config(config) def normpath(path): return path.replace("/", ":NORM:") normpath = mock.Mock(side_effect=normpath) with mock.patch("os.path.normpath", normpath): eq_( script._version_locations, ( os.path.abspath( os.path.join( _get_staging_directory(), "scripts", "versions" ) ).replace("/", ":NORM:"), ), ) eq_( script.versions, os.path.abspath( os.path.join( _get_staging_directory(), "scripts", "versions" ) ).replace("/", ":NORM:"), ) def test_script_location_muliple(self): config = _multi_dir_testing_config() script = ScriptDirectory.from_config(config) def normpath(path): return path.replace("/", ":NORM:") normpath = mock.Mock(side_effect=normpath) with mock.patch("os.path.normpath", normpath): eq_( script._version_locations, [ os.path.abspath( os.path.join(_get_staging_directory(), "model1/") ).replace("/", ":NORM:"), os.path.abspath( os.path.join(_get_staging_directory(), "model2/") ).replace("/", ":NORM:"), os.path.abspath( os.path.join(_get_staging_directory(), "model3/") ).replace("/", ":NORM:"), ], ) alembic-rel_1_7_6/tests/test_sqlite.py000066400000000000000000000211001417624537100202050ustar00rootroot00000000000000from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import text from sqlalchemy.sql import column from alembic import autogenerate from alembic import op from alembic.autogenerate import api from alembic.autogenerate.compare import _compare_server_default from alembic.migration import MigrationContext from alembic.operations import ops from alembic.testing import assert_raises_message from alembic.testing import config from alembic.testing import eq_ from alembic.testing import eq_ignore_whitespace from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.fixtures import op_fixture from alembic.testing.fixtures import TestBase class SQLiteTest(TestBase): def test_add_column(self): context = op_fixture("sqlite") op.add_column("t1", Column("c1", Integer)) context.assert_("ALTER TABLE t1 ADD COLUMN c1 INTEGER") def test_add_column_implicit_constraint(self): context = op_fixture("sqlite") op.add_column("t1", Column("c1", Boolean)) context.assert_("ALTER TABLE t1 ADD COLUMN c1 BOOLEAN") def test_add_explicit_constraint(self): op_fixture("sqlite") assert_raises_message( NotImplementedError, "No support for ALTER of constraints in SQLite dialect", op.create_check_constraint, "foo", "sometable", column("name") > 5, ) def test_drop_explicit_constraint(self): op_fixture("sqlite") assert_raises_message( NotImplementedError, "No support for ALTER of constraints in SQLite dialect", op.drop_constraint, "foo", "sometable", ) @config.requirements.comments def test_create_table_with_comment_ignored(self): context = op_fixture("sqlite") op.create_table( "t2", Column("c1", Integer, primary_key=True), Column("c2", Integer), comment="This is a table comment", ) context.assert_( "CREATE TABLE t2 (c1 INTEGER NOT NULL, " "c2 INTEGER, PRIMARY KEY (c1))" ) @config.requirements.comments def test_add_column_with_comment_ignored(self): context = op_fixture("sqlite") op.add_column("t1", Column("c1", Integer, comment="c1 comment")) context.assert_("ALTER TABLE t1 ADD COLUMN c1 INTEGER") class SQLiteDefaultCompareTest(TestBase): __only_on__ = "sqlite" __backend__ = True @classmethod def setup_class(cls): cls.bind = config.db staging_env() cls.migration_context = MigrationContext.configure( connection=cls.bind.connect(), opts={"compare_type": True, "compare_server_default": True}, ) def setUp(self): self.metadata = MetaData() self.autogen_context = api.AutogenContext(self.migration_context) @classmethod def teardown_class(cls): clear_staging_env() def tearDown(self): self.metadata.drop_all(config.db) def _compare_default_roundtrip( self, type_, orig_default, alternate=None, diff_expected=None ): diff_expected = ( diff_expected if diff_expected is not None else alternate is not None ) if alternate is None: alternate = orig_default t1 = Table( "test", self.metadata, Column("somecol", type_, server_default=orig_default), ) t2 = Table( "test", MetaData(), Column("somecol", type_, server_default=alternate), ) t1.create(self.bind) insp = inspect(self.bind) cols = insp.get_columns(t1.name) insp_col = Column( "somecol", cols[0]["type"], server_default=text(cols[0]["default"]) ) op = ops.AlterColumnOp("test", "somecol") _compare_server_default( self.autogen_context, op, None, "test", "somecol", insp_col, t2.c.somecol, ) diffs = op.to_diff_tuple() eq_(bool(diffs), diff_expected) def _compare_default(self, t1, t2, col, rendered): t1.create(self.bind, checkfirst=True) insp = inspect(self.bind) cols = insp.get_columns(t1.name) ctx = self.autogen_context.migration_context return ctx.impl.compare_server_default( None, col, rendered, cols[0]["default"] ) def test_compare_current_timestamp_func(self): self._compare_default_roundtrip( DateTime(), func.datetime("now", "localtime") ) def test_compare_current_timestamp_func_now(self): self._compare_default_roundtrip(DateTime(), func.now()) def test_compare_current_timestamp_text(self): # SQLAlchemy doesn't render the parenthesis for a # SQLite server default specified as text(), so users will be doing # this; sqlite comparison needs to accommodate for these. self._compare_default_roundtrip( DateTime(), text("(datetime('now', 'localtime'))") ) def test_compare_integer_str(self): self._compare_default_roundtrip(Integer(), "5") def test_compare_integer_str_diff(self): self._compare_default_roundtrip(Integer(), "5", "7") def test_compare_integer_text(self): self._compare_default_roundtrip(Integer(), text("5")) def test_compare_integer_text_diff(self): self._compare_default_roundtrip(Integer(), text("5"), "7") def test_compare_float_str(self): self._compare_default_roundtrip(Float(), "5.2") def test_compare_float_str_diff(self): self._compare_default_roundtrip(Float(), "5.2", "5.3") def test_compare_float_text(self): self._compare_default_roundtrip(Float(), text("5.2")) def test_compare_float_text_diff(self): self._compare_default_roundtrip(Float(), text("5.2"), "5.3") def test_compare_string_literal(self): self._compare_default_roundtrip(String(), "im a default") def test_compare_string_literal_diff(self): self._compare_default_roundtrip(String(), "im a default", "me too") class SQLiteAutogenRenderTest(TestBase): def setUp(self): ctx_opts = { "sqlalchemy_module_prefix": "sa.", "alembic_module_prefix": "op.", "target_metadata": MetaData(), } context = MigrationContext.configure( dialect_name="sqlite", opts=ctx_opts ) self.autogen_context = api.AutogenContext(context) def test_render_server_default_expr_needs_parens(self): c = Column( "date_value", DateTime(), server_default=func.datetime("now", "localtime"), ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('date_value', sa.DateTime(), " "server_default=sa.text(\"(datetime('now', 'localtime'))\"), " "nullable=True)", ) def test_render_server_default_text_expr_needs_parens(self): c = Column( "date_value", DateTime(), server_default=text("(datetime('now', 'localtime'))"), ) result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('date_value', sa.DateTime(), " "server_default=sa.text(\"(datetime('now', 'localtime'))\"), " "nullable=True)", ) def test_render_server_default_const(self): c = Column("int_value", Integer, server_default="5") result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('int_value', sa.Integer(), server_default='5', " "nullable=True)", ) @config.requirements.sqlalchemy_13 def test_render_add_column_w_on_conflict(self): c = Column("int_value", Integer, sqlite_on_conflict_not_null="FAIL") result = autogenerate.render._render_column(c, self.autogen_context) eq_ignore_whitespace( result, "sa.Column('int_value', sa.Integer(), " "nullable=True, sqlite_on_conflict_not_null='FAIL')", ) alembic-rel_1_7_6/tests/test_stubs.py000066400000000000000000000025031417624537100200520ustar00rootroot00000000000000import difflib from pathlib import Path import subprocess import sys import alembic from alembic.testing import eq_ from alembic.testing import TestBase _home = Path(__file__).parent.parent def run_command(file): res = subprocess.run( [ sys.executable, str((_home / "tools" / "write_pyi.py").relative_to(_home)), "--stdout", "--file", file, ], stdout=subprocess.PIPE, cwd=_home, encoding="utf-8", ) return res class TestStubFiles(TestBase): __requires__ = ("stubs_test",) def test_op_pyi(self): res = run_command("op") generated = res.stdout file_path = Path(alembic.__file__).parent / "op.pyi" expected = file_path.read_text() eq_(generated, expected, compare(generated, expected)) def test_context_pyi(self): res = run_command("context") generated = res.stdout file_path = Path(alembic.__file__).parent / "context.pyi" expected = file_path.read_text() eq_(generated, expected, compare(generated, expected)) def compare(actual: str, expected: str): diff = difflib.unified_diff( actual.splitlines(), expected.splitlines(), fromfile="generated", tofile="expected", ) return "\n".join(diff) alembic-rel_1_7_6/tests/test_suite.py000066400000000000000000000000541417624537100200420ustar00rootroot00000000000000from alembic.testing.suite import * # noqa alembic-rel_1_7_6/tests/test_version_table.py000066400000000000000000000325101417624537100215470ustar00rootroot00000000000000from sqlalchemy import Column from sqlalchemy import inspect from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from alembic import migration from alembic.testing import assert_raises from alembic.testing import assert_raises_message from alembic.testing import config from alembic.testing import eq_ from alembic.testing import mock from alembic.testing.fixtures import TestBase from alembic.util import CommandError version_table = Table( "version_table", MetaData(), Column("version_num", String(32), nullable=False), ) def _up(from_, to_, branch_presence_changed=False): return migration.StampStep(from_, to_, True, branch_presence_changed) def _down(from_, to_, branch_presence_changed=False): return migration.StampStep(from_, to_, False, branch_presence_changed) class TestMigrationContext(TestBase): @classmethod def setup_class(cls): cls.bind = config.db def setUp(self): self.connection = self.bind.connect() self.transaction = self.connection.begin() def tearDown(self): self.transaction.rollback() with self.connection.begin(): version_table.drop(self.connection, checkfirst=True) self.connection.close() def make_one(self, **kwargs): return migration.MigrationContext.configure(**kwargs) def get_revision(self): result = self.connection.execute(version_table.select()) rows = result.fetchall() if len(rows) == 0: return None eq_(len(rows), 1) return rows[0]["version_num"] def test_config_default_version_table_name(self): context = self.make_one(dialect_name="sqlite") eq_(context._version.name, "alembic_version") def test_config_explicit_version_table_name(self): context = self.make_one( dialect_name="sqlite", opts={"version_table": "explicit"} ) eq_(context._version.name, "explicit") eq_(context._version.primary_key.name, "explicit_pkc") def test_config_explicit_version_table_schema(self): context = self.make_one( dialect_name="sqlite", opts={"version_table_schema": "explicit"} ) eq_(context._version.schema, "explicit") def test_config_explicit_no_pk(self): context = self.make_one( dialect_name="sqlite", opts={"version_table_pk": False} ) eq_(len(context._version.primary_key), 0) def test_config_explicit_w_pk(self): context = self.make_one( dialect_name="sqlite", opts={"version_table_pk": True} ) eq_(len(context._version.primary_key), 1) eq_(context._version.primary_key.name, "alembic_version_pkc") def test_get_current_revision_doesnt_create_version_table(self): context = self.make_one( connection=self.connection, opts={"version_table": "version_table"} ) eq_(context.get_current_revision(), None) insp = inspect(self.connection) assert "version_table" not in insp.get_table_names() def test_get_current_revision(self): context = self.make_one( connection=self.connection, opts={"version_table": "version_table"} ) version_table.create(self.connection) eq_(context.get_current_revision(), None) self.connection.execute( version_table.insert().values(version_num="revid") ) eq_(context.get_current_revision(), "revid") def test_get_current_revision_error_if_starting_rev_given_online(self): context = self.make_one( connection=self.connection, opts={"starting_rev": "boo"} ) assert_raises(CommandError, context.get_current_revision) def test_get_current_revision_offline(self): context = self.make_one( dialect_name="sqlite", opts={"starting_rev": "startrev", "as_sql": True}, ) eq_(context.get_current_revision(), "startrev") def test_get_current_revision_multiple_heads(self): version_table.create(self.connection) context = self.make_one( connection=self.connection, opts={"version_table": "version_table"} ) updater = migration.HeadMaintainer(context, ()) updater.update_to_step(_up(None, "a", True)) updater.update_to_step(_up(None, "b", True)) assert_raises_message( CommandError, "Version table 'version_table' has more than one head present; " "please use get_current_heads()", context.get_current_revision, ) def test_get_heads(self): version_table.create(self.connection) context = self.make_one( connection=self.connection, opts={"version_table": "version_table"} ) updater = migration.HeadMaintainer(context, ()) updater.update_to_step(_up(None, "a", True)) updater.update_to_step(_up(None, "b", True)) eq_(context.get_current_heads(), ("a", "b")) def test_get_heads_offline(self): version_table.create(self.connection) context = self.make_one( connection=self.connection, opts={ "starting_rev": "q", "version_table": "version_table", "as_sql": True, }, ) eq_(context.get_current_heads(), ("q",)) def test_stamp_api_creates_table(self): context = self.make_one(connection=self.connection) assert ( "alembic_version" not in inspect(self.connection).get_table_names() ) script = mock.Mock( _stamp_revs=lambda revision, heads: [ _up(None, "a", True), _up(None, "b", True), ] ) context.stamp(script, "b") eq_(context.get_current_heads(), ("a", "b")) assert "alembic_version" in inspect(self.connection).get_table_names() class UpdateRevTest(TestBase): __backend__ = True @classmethod def setup_class(cls): cls.bind = config.db def setUp(self): self.connection = self.bind.connect() self.context = migration.MigrationContext.configure( connection=self.connection, opts={"version_table": "version_table"} ) with self.connection.begin(): version_table.create(self.connection) self.updater = migration.HeadMaintainer(self.context, ()) def tearDown(self): in_t = getattr(self.connection, "in_transaction", lambda: False) if in_t(): self.connection.rollback() with self.connection.begin(): version_table.drop(self.connection, checkfirst=True) self.connection.close() def _assert_heads(self, heads): eq_(set(self.context.get_current_heads()), set(heads)) eq_(self.updater.heads, set(heads)) def test_update_none_to_single(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self._assert_heads(("a",)) def test_update_single_to_single(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.update_to_step(_up("a", "b")) self._assert_heads(("b",)) def test_update_single_to_none(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.update_to_step(_down("a", None, True)) self._assert_heads(()) def test_add_branches(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.update_to_step(_up("a", "b")) self.updater.update_to_step(_up(None, "c", True)) self._assert_heads(("b", "c")) self.updater.update_to_step(_up("c", "d")) self.updater.update_to_step(_up("d", "e1")) self.updater.update_to_step(_up("d", "e2", True)) self._assert_heads(("b", "e1", "e2")) def test_teardown_branches(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "d1", True)) self.updater.update_to_step(_up(None, "d2", True)) self._assert_heads(("d1", "d2")) self.updater.update_to_step(_down("d1", "c")) self._assert_heads(("c", "d2")) self.updater.update_to_step(_down("d2", "c", True)) self._assert_heads(("c",)) self.updater.update_to_step(_down("c", "b")) self._assert_heads(("b",)) def test_resolve_merges(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.update_to_step(_up("a", "b")) self.updater.update_to_step(_up("b", "c1")) self.updater.update_to_step(_up("b", "c2", True)) self.updater.update_to_step(_up("c1", "d1")) self.updater.update_to_step(_up("c2", "d2")) self._assert_heads(("d1", "d2")) self.updater.update_to_step(_up(("d1", "d2"), "e")) self._assert_heads(("e",)) def test_unresolve_merges(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "e", True)) self.updater.update_to_step(_down("e", ("d1", "d2"))) self._assert_heads(("d2", "d1")) self.updater.update_to_step(_down("d2", "c2")) self._assert_heads(("c2", "d1")) def test_update_no_match(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.heads.add("x") assert_raises_message( CommandError, "Online migration expected to match one row when updating " "'x' to 'b' in 'version_table'; 0 found", self.updater.update_to_step, _up("x", "b"), ) def test_update_no_match_no_sane_rowcount(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.heads.add("x") with mock.patch.object( self.connection.dialect, "supports_sane_rowcount", False ): self.updater.update_to_step(_up("x", "b")) def test_update_multi_match(self): with self.connection.begin(): self.connection.execute( version_table.insert(), dict(version_num="a") ) self.connection.execute( version_table.insert(), dict(version_num="a") ) self.updater.heads.add("a") assert_raises_message( CommandError, "Online migration expected to match one row when updating " "'a' to 'b' in 'version_table'; 2 found", self.updater.update_to_step, _up("a", "b"), ) def test_update_multi_match_no_sane_rowcount(self): with self.connection.begin(): self.connection.execute( version_table.insert(), dict(version_num="a") ) self.connection.execute( version_table.insert(), dict(version_num="a") ) self.updater.heads.add("a") with mock.patch.object( self.connection.dialect, "supports_sane_rowcount", False ): self.updater.update_to_step(_up("a", "b")) def test_delete_no_match(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.heads.add("x") assert_raises_message( CommandError, "Online migration expected to match one row when " "deleting 'x' in 'version_table'; 0 found", self.updater.update_to_step, _down("x", None, True), ) def test_delete_no_matchno_sane_rowcount(self): with self.connection.begin(): self.updater.update_to_step(_up(None, "a", True)) self.updater.heads.add("x") with mock.patch.object( self.connection.dialect, "supports_sane_rowcount", False ): self.updater.update_to_step(_down("x", None, True)) def test_delete_multi_match(self): with self.connection.begin(): self.connection.execute( version_table.insert(), dict(version_num="a") ) self.connection.execute( version_table.insert(), dict(version_num="a") ) self.updater.heads.add("a") assert_raises_message( CommandError, "Online migration expected to match one row when " "deleting 'a' in 'version_table'; 2 found", self.updater.update_to_step, _down("a", None, True), ) def test_delete_multi_match_no_sane_rowcount(self): with self.connection.begin(): self.connection.execute( version_table.insert(), dict(version_num="a") ) self.connection.execute( version_table.insert(), dict(version_num="a") ) self.updater.heads.add("a") with mock.patch.object( self.connection.dialect, "supports_sane_rowcount", False ): self.updater.update_to_step(_down("a", None, True)) alembic-rel_1_7_6/tests/test_version_traversal.py000066400000000000000000001331441417624537100224700ustar00rootroot00000000000000from alembic import util from alembic.migration import HeadMaintainer from alembic.migration import MigrationStep from alembic.testing import assert_raises_message from alembic.testing import eq_ from alembic.testing import expect_warnings from alembic.testing import mock from alembic.testing.env import clear_staging_env from alembic.testing.env import staging_env from alembic.testing.fixtures import TestBase class MigrationTest(TestBase): def up_(self, rev): return MigrationStep.upgrade_from_script(self.env.revision_map, rev) def down_(self, rev): return MigrationStep.downgrade_from_script(self.env.revision_map, rev) def _assert_downgrade(self, destination, source, expected, expected_heads): revs = self.env._downgrade_revs(destination, source) eq_(revs, expected) heads = set(util.to_tuple(source, default=())) head = HeadMaintainer(mock.Mock(), heads) for rev in revs: head.update_to_step(rev) eq_(head.heads, expected_heads) def _assert_upgrade(self, destination, source, expected, expected_heads): revs = self.env._upgrade_revs(destination, source) eq_(revs, expected) heads = set(util.to_tuple(source, default=())) head = HeadMaintainer(mock.Mock(), heads) for rev in revs: head.update_to_step(rev) eq_(head.heads, expected_heads) class RevisionPathTest(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a = env.generate_revision(util.rev_id(), "->a") cls.b = env.generate_revision(util.rev_id(), "a->b") cls.c = env.generate_revision(util.rev_id(), "b->c") cls.d = env.generate_revision(util.rev_id(), "c->d") cls.e = env.generate_revision(util.rev_id(), "d->e") @classmethod def teardown_class(cls): clear_staging_env() def test_downgrade_base_no_version(self): self._assert_downgrade("base", [], [], set()) def test_downgrade_to_existing(self): """test for #838; downgrade to a revision that's already in current heads, but is not itself a head.""" self._assert_downgrade( self.a.revision, [self.a.revision], [], {self.a.revision} ) def test_downgrade_to_existing_head(self): """test for #839; downgrade to a revision that's already in current heads, which *is* itself a head.""" self._assert_downgrade( self.e.revision, [self.e.revision], [], {self.e.revision} ) def test_upgrade_path(self): self._assert_upgrade( self.e.revision, self.c.revision, [self.up_(self.d), self.up_(self.e)], set([self.e.revision]), ) self._assert_upgrade( self.c.revision, None, [self.up_(self.a), self.up_(self.b), self.up_(self.c)], set([self.c.revision]), ) def test_relative_upgrade_path(self): self._assert_upgrade( "+2", self.a.revision, [self.up_(self.b), self.up_(self.c)], set([self.c.revision]), ) self._assert_upgrade( "+1", self.a.revision, [self.up_(self.b)], set([self.b.revision]) ) self._assert_upgrade( "+3", self.b.revision, [self.up_(self.c), self.up_(self.d), self.up_(self.e)], set([self.e.revision]), ) self._assert_upgrade( "%s+2" % self.b.revision, self.a.revision, [self.up_(self.b), self.up_(self.c), self.up_(self.d)], set([self.d.revision]), ) self._assert_upgrade( "%s-2" % self.d.revision, self.a.revision, [self.up_(self.b)], set([self.b.revision]), ) def test_invalid_relative_upgrade_path(self): assert_raises_message( util.CommandError, "Relative revision -2 didn't produce 2 migrations", self.env._upgrade_revs, "-2", self.b.revision, ) assert_raises_message( util.CommandError, r"Relative revision \+5 didn't produce 5 migrations", self.env._upgrade_revs, "+5", self.b.revision, ) def test_downgrade_path(self): self._assert_downgrade( self.c.revision, self.e.revision, [self.down_(self.e), self.down_(self.d)], set([self.c.revision]), ) self._assert_downgrade( None, self.c.revision, [self.down_(self.c), self.down_(self.b), self.down_(self.a)], set(), ) def test_relative_downgrade_path(self): self._assert_downgrade( "-1", self.c.revision, [self.down_(self.c)], set([self.b.revision]) ) self._assert_downgrade( "-3", self.e.revision, [self.down_(self.e), self.down_(self.d), self.down_(self.c)], set([self.b.revision]), ) self._assert_downgrade( "%s+2" % self.a.revision, self.d.revision, [self.down_(self.d)], set([self.c.revision]), ) self._assert_downgrade( "%s-2" % self.c.revision, self.d.revision, [self.down_(self.d), self.down_(self.c), self.down_(self.b)], set([self.a.revision]), ) def test_invalid_relative_downgrade_path(self): assert_raises_message( util.CommandError, "Relative revision -5 didn't produce 5 migrations", self.env._downgrade_revs, "-5", self.b.revision, ) assert_raises_message( util.CommandError, r"Relative revision \+2 didn't produce 2 migrations", self.env._downgrade_revs, "+2", self.b.revision, ) def test_invalid_move_rev_to_none(self): assert_raises_message( util.CommandError, r"Destination %s is not a valid downgrade " r"target from current head\(s\)" % self.b.revision[0:3], self.env._downgrade_revs, self.b.revision[0:3], None, ) def test_invalid_move_higher_to_lower(self): assert_raises_message( util.CommandError, r"Destination %s is not a valid downgrade " r"target from current head\(s\)" % self.c.revision[0:4], self.env._downgrade_revs, self.c.revision[0:4], self.b.revision, ) def test_stamp_to_base(self): revs = self.env._stamp_revs("base", self.d.revision) eq_(len(revs), 1) assert revs[0].should_delete_branch eq_(revs[0].delete_version_num, self.d.revision) class BranchedPathTest(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a = env.generate_revision(util.rev_id(), "->a") cls.b = env.generate_revision(util.rev_id(), "a->b") cls.c1 = env.generate_revision( util.rev_id(), "b->c1", branch_labels="c1branch", refresh=True ) cls.d1 = env.generate_revision(util.rev_id(), "c1->d1") cls.c2 = env.generate_revision( util.rev_id(), "b->c2", branch_labels="c2branch", head=cls.b.revision, splice=True, ) cls.d2 = env.generate_revision( util.rev_id(), "c2->d2", head=cls.c2.revision ) @classmethod def teardown_class(cls): clear_staging_env() def test_stamp_down_across_multiple_branch_to_branchpoint(self): heads = [self.d1.revision, self.c2.revision] revs = self.env._stamp_revs(self.b.revision, heads) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents(heads), # DELETE d1 revision, UPDATE c2 to b ([self.d1.revision], self.c2.revision, self.b.revision), ) def test_stamp_to_labeled_base_multiple_heads(self): revs = self.env._stamp_revs( "c1branch@base", [self.d1.revision, self.c2.revision] ) eq_(len(revs), 1) assert revs[0].should_delete_branch eq_(revs[0].delete_version_num, self.d1.revision) def test_stamp_to_labeled_head_multiple_heads(self): heads = [self.d1.revision, self.c2.revision] revs = self.env._stamp_revs("c2branch@head", heads) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents(heads), # the c1branch remains unchanged ([], self.c2.revision, self.d2.revision), ) def test_upgrade_single_branch(self): self._assert_upgrade( self.d1.revision, self.b.revision, [self.up_(self.c1), self.up_(self.d1)], set([self.d1.revision]), ) def test_upgrade_multiple_branch(self): # move from a single head to multiple heads self._assert_upgrade( (self.d1.revision, self.d2.revision), self.a.revision, [ self.up_(self.b), self.up_(self.c2), self.up_(self.d2), self.up_(self.c1), self.up_(self.d1), ], set([self.d1.revision, self.d2.revision]), ) def test_downgrade_multiple_branch(self): self._assert_downgrade( self.a.revision, (self.d1.revision, self.d2.revision), [ self.down_(self.d1), self.down_(self.c1), self.down_(self.d2), self.down_(self.c2), self.down_(self.b), ], set([self.a.revision]), ) def test_relative_upgrade(self): self._assert_upgrade( "c2branch@head-1", self.b.revision, [self.up_(self.c2)], set([self.c2.revision]), ) def test_relative_downgrade_baseplus2(self): """base+2 points to b, no branch label, drop everything above b.""" self._assert_downgrade( "base+2", [self.d2.revision, self.d1.revision], [ self.down_(self.d1), self.down_(self.c1), self.down_(self.d2), self.down_(self.c2), ], set([self.b.revision]), ) def test_relative_downgrade_branchplus2(self): """ Correct behaviour (per https://github.com/sqlalchemy/alembic/pull/763#issuecomment-738741297) Only the c2branch should be downgraded, right back to base+2 = b """ self._assert_downgrade( "c2branch@base+2", [self.d2.revision, self.d1.revision], [self.down_(self.d2), self.down_(self.c2)], set([self.d1.revision]), ) def test_relative_downgrade_branchplus3(self): """c2branch@base+3 equivalent to c2.""" self._assert_downgrade( self.c2.revision, [self.d2.revision, self.d1.revision], [self.down_(self.d2)], set([self.d1.revision, self.c2.revision]), ) self._assert_downgrade( "c2branch@base+3", [self.d2.revision, self.d1.revision], [self.down_(self.d2)], set([self.d1.revision, self.c2.revision]), ) # Old downgrade -1 behaviour depends on order of branch upgrades. # This should probably fail (ambiguous) but is currently documented # as a key use case in branching. def test_downgrade_once_order_right(self): with expect_warnings("downgrade -1 from multiple heads is ambiguous;"): self._assert_downgrade( "-1", [self.d2.revision, self.d1.revision], [self.down_(self.d2)], set([self.d1.revision, self.c2.revision]), ) def test_downgrade_once_order_right_unbalanced(self): with expect_warnings("downgrade -1 from multiple heads is ambiguous;"): self._assert_downgrade( "-1", [self.c2.revision, self.d1.revision], [self.down_(self.c2)], set([self.d1.revision]), ) def test_downgrade_once_order_left(self): with expect_warnings("downgrade -1 from multiple heads is ambiguous;"): self._assert_downgrade( "-1", [self.d1.revision, self.d2.revision], [self.down_(self.d1)], set([self.d2.revision, self.c1.revision]), ) def test_downgrade_once_order_left_unbalanced(self): with expect_warnings("downgrade -1 from multiple heads is ambiguous;"): self._assert_downgrade( "-1", [self.c1.revision, self.d2.revision], [self.down_(self.c1)], set([self.d2.revision]), ) def test_downgrade_once_order_left_unbalanced_labelled(self): self._assert_downgrade( "c1branch@-1", [self.d1.revision, self.d2.revision], [self.down_(self.d1)], set([self.c1.revision, self.d2.revision]), ) # Captures https://github.com/sqlalchemy/alembic/issues/765 def test_downgrade_relative_order_right(self): self._assert_downgrade( "{}-1".format(self.d2.revision), [self.d2.revision, self.c1.revision], [self.down_(self.d2)], set([self.c1.revision, self.c2.revision]), ) def test_downgrade_relative_order_left(self): self._assert_downgrade( "{}-1".format(self.d2.revision), [self.c1.revision, self.d2.revision], [self.down_(self.d2)], set([self.c1.revision, self.c2.revision]), ) def test_downgrade_single_branch_c1branch(self): """Use branch label to specify the branch to downgrade.""" self._assert_downgrade( "c1branch@{}".format(self.b.revision), (self.c1.revision, self.d2.revision), [ self.down_(self.c1), ], set([self.d2.revision]), ) def test_downgrade_single_branch_c1branch_from_d1_head(self): """Use branch label to specify the branch (where the branch label is not on the head revision).""" self._assert_downgrade( "c2branch@{}".format(self.b.revision), (self.c1.revision, self.d2.revision), [ self.down_(self.d2), self.down_(self.c2), ], set([self.c1.revision]), ) def test_downgrade_single_branch_c2(self): """Use a revision on the branch (not head) to specify the branch.""" self._assert_downgrade( "{}@{}".format(self.c2.revision, self.b.revision), (self.d1.revision, self.d2.revision), [ self.down_(self.d2), self.down_(self.c2), ], set([self.d1.revision]), ) def test_downgrade_single_branch_d1(self): """Use the head revision to specify the branch.""" self._assert_downgrade( "{}@{}".format(self.d1.revision, self.b.revision), (self.d1.revision, self.d2.revision), [ self.down_(self.d1), self.down_(self.c1), ], set([self.d2.revision]), ) def test_downgrade_relative_to_branch_head(self): self._assert_downgrade( "c1branch@head-1", (self.d1.revision, self.d2.revision), [self.down_(self.d1)], set([self.c1.revision, self.d2.revision]), ) def test_upgrade_other_branch_from_mergepoint(self): # Advance c2branch forward by one, meaning one past the mergepoint # in this case. self._assert_upgrade( "c2branch@+1", (self.c1.revision), [self.up_(self.c2)], set([self.c1.revision, self.c2.revision]), ) def test_upgrade_one_branch_of_heads(self): # Still a bit of ambiguity here ... does this mean an absolute # revision "goto revision c2 (labelled c2branch), +1", or "move up # one revision from current along c2branch"? self._assert_upgrade( "c2branch@+1", (self.c1.revision, self.c2.revision), [self.up_(self.d2)], set([self.c1.revision, self.d2.revision]), ) def test_ambiguous_upgrade(self): assert_raises_message( util.CommandError, "Ambiguous upgrade from multiple current revisions", self.env._upgrade_revs, "+1", [self.c1.revision, self.c2.revision], ) def test_upgrade_from_base(self): self._assert_upgrade( "base+1", [], [self.up_(self.a)], set([self.a.revision]) ) def test_upgrade_from_base_implicit(self): self._assert_upgrade( "+1", [], [self.up_(self.a)], set([self.a.revision]) ) def test_downgrade_minus1_to_base(self): self._assert_downgrade( "-1", [self.a.revision], [self.down_(self.a)], set() ) def test_downgrade_minus1_from_base(self): assert_raises_message( util.CommandError, "Relative revision -1 didn't produce 1 migrations", self.env._downgrade_revs, "-1", [], ) def test_downgrade_no_effect_branched(self): """Added for good measure when there are multiple branches.""" self._assert_downgrade( self.c2.revision, [self.d1.revision, self.c2.revision], [], set([self.d1.revision, self.c2.revision]), ) self._assert_downgrade( self.d1.revision, [self.d1.revision, self.c2.revision], [], set([self.d1.revision, self.c2.revision]), ) class BranchFromMergepointTest(MigrationTest): """this is a form that will come up frequently in the "many independent roots with cross-dependencies" case. """ @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a1 = env.generate_revision(util.rev_id(), "->a1") cls.b1 = env.generate_revision(util.rev_id(), "a1->b1") cls.c1 = env.generate_revision(util.rev_id(), "b1->c1") cls.a2 = env.generate_revision( util.rev_id(), "->a2", head=(), refresh=True ) cls.b2 = env.generate_revision( util.rev_id(), "a2->b2", head=cls.a2.revision ) cls.c2 = env.generate_revision( util.rev_id(), "b2->c2", head=cls.b2.revision ) # mergepoint between c1, c2 # d1 dependent on c2 cls.d1 = env.generate_revision( util.rev_id(), "d1", head=(cls.c1.revision, cls.c2.revision), refresh=True, ) # but then c2 keeps going into d2 cls.d2 = env.generate_revision( util.rev_id(), "d2", head=cls.c2.revision, refresh=True, splice=True, ) @classmethod def teardown_class(cls): clear_staging_env() def test_mergepoint_to_only_one_side_upgrade(self): self._assert_upgrade( self.d1.revision, (self.d2.revision, self.b1.revision), [self.up_(self.c1), self.up_(self.d1)], set([self.d2.revision, self.d1.revision]), ) def test_mergepoint_to_only_one_side_downgrade(self): self._assert_downgrade( self.b1.revision, (self.d2.revision, self.d1.revision), [self.down_(self.d1), self.down_(self.c1)], set([self.d2.revision, self.b1.revision]), ) class BranchFrom3WayMergepointTest(MigrationTest): """this is a form that will come up frequently in the "many independent roots with cross-dependencies" case. """ @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a1 = env.generate_revision(util.rev_id(), "->a1") cls.b1 = env.generate_revision(util.rev_id(), "a1->b1") cls.c1 = env.generate_revision(util.rev_id(), "b1->c1") cls.a2 = env.generate_revision( util.rev_id(), "->a2", head=(), refresh=True ) cls.b2 = env.generate_revision( util.rev_id(), "a2->b2", head=cls.a2.revision ) cls.c2 = env.generate_revision( util.rev_id(), "b2->c2", head=cls.b2.revision ) cls.a3 = env.generate_revision( util.rev_id(), "->a3", head=(), refresh=True ) cls.b3 = env.generate_revision( util.rev_id(), "a3->b3", head=cls.a3.revision ) cls.c3 = env.generate_revision( util.rev_id(), "b3->c3", head=cls.b3.revision ) # mergepoint between c1, c2, c3 # d1 dependent on c2, c3 cls.d1 = env.generate_revision( util.rev_id(), "d1", head=(cls.c1.revision, cls.c2.revision, cls.c3.revision), refresh=True, ) # but then c2 keeps going into d2 cls.d2 = env.generate_revision( util.rev_id(), "d2", head=cls.c2.revision, refresh=True, splice=True, ) # c3 keeps going into d3 cls.d3 = env.generate_revision( util.rev_id(), "d3", head=cls.c3.revision, refresh=True, splice=True, ) @classmethod def teardown_class(cls): clear_staging_env() def test_mergepoint_to_only_one_side_upgrade(self): self._assert_upgrade( self.d1.revision, (self.d3.revision, self.d2.revision, self.b1.revision), [self.up_(self.c1), self.up_(self.d1)], set([self.d3.revision, self.d2.revision, self.d1.revision]), ) def test_mergepoint_to_only_one_side_downgrade(self): self._assert_downgrade( self.b1.revision, (self.d3.revision, self.d2.revision, self.d1.revision), [self.down_(self.d1), self.down_(self.c1)], set([self.d3.revision, self.d2.revision, self.b1.revision]), ) def test_mergepoint_to_two_sides_upgrade(self): self._assert_upgrade( self.d1.revision, (self.d3.revision, self.b2.revision, self.b1.revision), [self.up_(self.c2), self.up_(self.c1), self.up_(self.d1)], # this will merge b2 and b1 into d1 set([self.d3.revision, self.d1.revision]), ) # but then! b2 will break out again if we keep going with it self._assert_upgrade( self.d2.revision, (self.d3.revision, self.d1.revision), [self.up_(self.d2)], set([self.d3.revision, self.d2.revision, self.d1.revision]), ) class TwinMergeTest(MigrationTest): """Test #297, where we have two mergepoints from the same set of originating branches. """ @classmethod def setup_class(cls): """ 33e21c000cfe -> 178d4e761bbd (head), 2bef33cb3a58, 3904558db1c6, 968330f320d -> 33e21c000cfe (mergepoint) 46c99f866004 -> 18f46b42410d (head), 2bef33cb3a58, 3904558db1c6, 968330f320d -> 46c99f866004 (mergepoint) f0fa4315825 -> 3904558db1c6 (branchpoint), -------------------------- A -> B2 (branchpoint), B1, B2, B3 -> C1 (mergepoint) B1, B2, B3 -> C2 (mergepoint) C1 -> D1 (head), C2 -> D2 (head), """ cls.env = env = staging_env() cls.a = env.generate_revision("a", "a") cls.b1 = env.generate_revision("b1", "b1", head=cls.a.revision) cls.b2 = env.generate_revision( "b2", "b2", splice=True, head=cls.a.revision ) cls.b3 = env.generate_revision( "b3", "b3", splice=True, head=cls.a.revision ) cls.c1 = env.generate_revision( "c1", "c1", head=(cls.b1.revision, cls.b2.revision, cls.b3.revision), ) cls.c2 = env.generate_revision( "c2", "c2", splice=True, head=(cls.b1.revision, cls.b2.revision, cls.b3.revision), ) cls.d1 = env.generate_revision("d1", "d1", head=cls.c1.revision) cls.d2 = env.generate_revision("d2", "d2", head=cls.c2.revision) @classmethod def teardown_class(cls): clear_staging_env() def test_upgrade(self): head = HeadMaintainer(mock.Mock(), [self.a.revision]) steps = [ (self.up_(self.b3), ("b3",)), (self.up_(self.b1), ("b1", "b3")), (self.up_(self.b2), ("b1", "b2", "b3")), (self.up_(self.c2), ("c2",)), (self.up_(self.d2), ("d2",)), (self.up_(self.c1), ("c1", "d2")), (self.up_(self.d1), ("d1", "d2")), ] for step, assert_ in steps: head.update_to_step(step) eq_(head.heads, set(assert_)) class NotQuiteTwinMergeTest(MigrationTest): """Test a variant of #297.""" @classmethod def setup_class(cls): """ A -> B2 (branchpoint), B1, B2 -> C1 (mergepoint) B2, B3 -> C2 (mergepoint) C1 -> D1 (head), C2 -> D2 (head), """ cls.env = env = staging_env() cls.a = env.generate_revision("a", "a") cls.b1 = env.generate_revision("b1", "b1", head=cls.a.revision) cls.b2 = env.generate_revision( "b2", "b2", splice=True, head=cls.a.revision ) cls.b3 = env.generate_revision( "b3", "b3", splice=True, head=cls.a.revision ) cls.c1 = env.generate_revision( "c1", "c1", head=(cls.b1.revision, cls.b2.revision) ) cls.c2 = env.generate_revision( "c2", "c2", splice=True, head=(cls.b2.revision, cls.b3.revision) ) cls.d1 = env.generate_revision("d1", "d1", head=cls.c1.revision) cls.d2 = env.generate_revision("d2", "d2", head=cls.c2.revision) @classmethod def teardown_class(cls): clear_staging_env() def test_upgrade(self): head = HeadMaintainer(mock.Mock(), [self.a.revision]) """ upgrade a -> b2, b2 upgrade a -> b3, b3 upgrade b2, b3 -> c2, c2 upgrade c2 -> d2, d2 upgrade a -> b1, b1 upgrade b1, b2 -> c1, c1 upgrade c1 -> d1, d1 """ steps = [ (self.up_(self.b2), ("b2",)), (self.up_(self.b3), ("b2", "b3")), (self.up_(self.c2), ("c2",)), (self.up_(self.d2), ("d2",)), (self.up_(self.b1), ("b1", "d2")), (self.up_(self.c1), ("c1", "d2")), (self.up_(self.d1), ("d1", "d2")), ] for step, assert_ in steps: head.update_to_step(step) eq_(head.heads, set(assert_)) class DependsOnBranchTestOne(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a1 = env.generate_revision( util.rev_id(), "->a1", branch_labels=["lib1"] ) cls.b1 = env.generate_revision(util.rev_id(), "a1->b1") cls.c1 = env.generate_revision(util.rev_id(), "b1->c1") cls.a2 = env.generate_revision(util.rev_id(), "->a2", head=()) cls.b2 = env.generate_revision( util.rev_id(), "a2->b2", head=cls.a2.revision ) cls.c2 = env.generate_revision( util.rev_id(), "b2->c2", head=cls.b2.revision, depends_on=cls.c1.revision, ) cls.d1 = env.generate_revision( util.rev_id(), "c1->d1", head=cls.c1.revision ) cls.e1 = env.generate_revision( util.rev_id(), "d1->e1", head=cls.d1.revision ) cls.f1 = env.generate_revision( util.rev_id(), "e1->f1", head=cls.e1.revision ) @classmethod def teardown_class(cls): clear_staging_env() def test_downgrade_to_dependency(self): heads = [self.c2.revision, self.d1.revision] head = HeadMaintainer(mock.Mock(), heads) head.update_to_step(self.down_(self.d1)) eq_(head.heads, set([self.c2.revision])) def test_stamp_across_dependency(self): heads = [self.e1.revision, self.c2.revision] head = HeadMaintainer(mock.Mock(), heads) for step in self.env._stamp_revs(self.b1.revision, heads): head.update_to_step(step) eq_(head.heads, set([self.b1.revision])) class DependsOnBranchTestTwo(MigrationTest): @classmethod def setup_class(cls): """ Structure:: a1 ---+ | a2 ---+--> amerge | a3 ---+ ^ | +---------------------------+ | b1 ---+ | +--> bmerge overmerge / d1 b2 ---+ | | ^ | | | | | +--------------------------+ | | +-----------------------------+ | v c1 ---+ | c2 ---+--> cmerge | c3 ---+ """ cls.env = env = staging_env() cls.a1 = env.generate_revision("a1", "->a1", head="base") cls.a2 = env.generate_revision("a2", "->a2", head="base") cls.a3 = env.generate_revision("a3", "->a3", head="base") cls.amerge = env.generate_revision( "amerge", "amerge", head=[cls.a1.revision, cls.a2.revision, cls.a3.revision], ) cls.b1 = env.generate_revision("b1", "->b1", head="base") cls.b2 = env.generate_revision("b2", "->b2", head="base") cls.bmerge = env.generate_revision( "bmerge", "bmerge", head=[cls.b1.revision, cls.b2.revision] ) cls.c1 = env.generate_revision("c1", "->c1", head="base") cls.c2 = env.generate_revision("c2", "->c2", head="base") cls.c3 = env.generate_revision("c3", "->c3", head="base") cls.cmerge = env.generate_revision( "cmerge", "cmerge", head=[cls.c1.revision, cls.c2.revision, cls.c3.revision], ) cls.d1 = env.generate_revision( "d1", "o", head="base", depends_on=[cls.a3.revision, cls.b2.revision, cls.c1.revision], ) @classmethod def teardown_class(cls): clear_staging_env() def test_kaboom(self): # here's the upgrade path: # ['->c1', '->b2', '->a3', 'overmerge', '->c3', '->c2', 'cmerge', # '->b1', 'bmerge', '->a2', '->a1', 'amerge'], heads = [ self.amerge.revision, self.bmerge.revision, self.cmerge.revision, self.d1.revision, ] self._assert_downgrade( self.b2.revision, heads, [self.down_(self.bmerge)], set( [ self.amerge.revision, self.b1.revision, self.cmerge.revision, # b2 isn't here, but d1 is, which implies b2. OK! self.d1.revision, ] ), ) # start with those heads.. heads = [ self.amerge.revision, self.d1.revision, self.b1.revision, self.cmerge.revision, ] # downgrade d1... self._assert_downgrade( "d1@base", heads, [self.down_(self.d1)], set( [ self.amerge.revision, self.b1.revision, # b2 has to be INSERTed, because it was implied by d1 self.b2.revision, self.cmerge.revision, ] ), ) # start with those heads ... heads = [ self.amerge.revision, self.b1.revision, self.b2.revision, self.cmerge.revision, ] # this ordering can vary a lot based on what # sorting algorithm is in use because it's all # heads self._assert_downgrade( "base", heads, [ self.down_(self.amerge), self.down_(self.a1), self.down_(self.b1), self.down_(self.b2), self.down_(self.cmerge), self.down_(self.c1), self.down_(self.a2), self.down_(self.a3), self.down_(self.c2), self.down_(self.c3), ], set([]), ) class DependsOnBranchTestThree(MigrationTest): @classmethod def setup_class(cls): """ issue #377 Structure:: -> a1 --+--> a2 -------> a3 | ^ | | | +------+ | | | | +---|------+ | | | | v | +-------> b1 --> b2 --> b3 """ cls.env = env = staging_env() cls.a1 = env.generate_revision("a1", "->a1", head="base") cls.a2 = env.generate_revision("a2", "->a2") cls.b1 = env.generate_revision("b1", "->b1", head="base") cls.b2 = env.generate_revision( "b2", "->b2", depends_on="a2", head="b1" ) cls.b3 = env.generate_revision("b3", "->b3", head="b2") cls.a3 = env.generate_revision( "a3", "->a3", head="a2", depends_on="b1" ) @classmethod def teardown_class(cls): clear_staging_env() def test_downgrade_over_crisscross(self): # this state was not possible prior to # #377. a3 would be considered half of a merge point # between a3 and b2, and the head would be forced down # to b1. In this test however, we're not allowed to remove # b2 because a2 is dependent on it, hence we add the ability # to remove half of a merge point. self._assert_downgrade( "b1", ["a3", "b2"], [self.down_(self.b2)], set(["a3"]), # we have b1 also, which is implied by a3 ) class DependsOnOwnDownrevTest(MigrationTest): @classmethod def setup_class(cls): """ test #843 """ cls.env = env = staging_env() cls.a1 = env.generate_revision("a1", "->a1", head="base") cls.a2 = env.generate_revision("a2", "->a2", depends_on="a1") @classmethod def teardown_class(cls): clear_staging_env() def test_traverse(self): self._assert_upgrade( self.a2.revision, None, [self.up_(self.a1), self.up_(self.a2)], set(["a2"]), ) def test_traverse_down(self): self._assert_downgrade( self.a1.revision, self.a2.revision, [self.down_(self.a2)], set(["a1"]), ) class DependsOnBranchTestFour(MigrationTest): @classmethod def setup_class(cls): """ test issue #789 """ cls.env = env = staging_env() cls.a1 = env.generate_revision("a1", "->a1", head="base") cls.a2 = env.generate_revision("a2", "->a2") cls.a3 = env.generate_revision("a3", "->a3") cls.b1 = env.generate_revision("b1", "->b1", head="base") cls.b2 = env.generate_revision( "b2", "->b2", head="b1", depends_on="a3" ) cls.b3 = env.generate_revision("b3", "->b3", head="b2") cls.b4 = env.generate_revision( "b4", "->b4", head="b3", depends_on="a3" ) @classmethod def teardown_class(cls): clear_staging_env() def test_dependencies_are_normalized(self): heads = [self.b4.revision] self._assert_downgrade( self.b3.revision, heads, [self.down_(self.b4)], # a3 isn't here, because b3 still implies a3 set([self.b3.revision]), ) class DependsOnBranchLabelTest(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a1 = env.generate_revision( util.rev_id(), "->a1", branch_labels=["lib1"] ) cls.b1 = env.generate_revision(util.rev_id(), "a1->b1") cls.c1 = env.generate_revision( util.rev_id(), "b1->c1", branch_labels=["c1lib"] ) cls.a2 = env.generate_revision(util.rev_id(), "->a2", head=()) cls.b2 = env.generate_revision( util.rev_id(), "a2->b2", head=cls.a2.revision ) cls.c2 = env.generate_revision( util.rev_id(), "b2->c2", head=cls.b2.revision, depends_on=["c1lib"] ) cls.d1 = env.generate_revision( util.rev_id(), "c1->d1", head=cls.c1.revision ) cls.e1 = env.generate_revision( util.rev_id(), "d1->e1", head=cls.d1.revision ) cls.f1 = env.generate_revision( util.rev_id(), "e1->f1", head=cls.e1.revision ) @classmethod def teardown_class(cls): clear_staging_env() def test_upgrade_path(self): self._assert_upgrade( self.c2.revision, self.a2.revision, [ self.up_(self.a1), self.up_(self.b1), self.up_(self.c1), self.up_(self.b2), self.up_(self.c2), ], set([self.c2.revision]), ) class ForestTest(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a1 = env.generate_revision(util.rev_id(), "->a1") cls.b1 = env.generate_revision(util.rev_id(), "a1->b1") cls.a2 = env.generate_revision( util.rev_id(), "->a2", head=(), refresh=True ) cls.b2 = env.generate_revision( util.rev_id(), "a2->b2", head=cls.a2.revision ) @classmethod def teardown_class(cls): clear_staging_env() def test_base_to_heads(self): eq_( self.env._upgrade_revs("heads", "base"), [ self.up_(self.a2), self.up_(self.b2), self.up_(self.a1), self.up_(self.b1), ], ) def test_stamp_to_heads(self): revs = self.env._stamp_revs("heads", ()) eq_(len(revs), 2) eq_( set(r.to_revisions for r in revs), set([(self.b1.revision,), (self.b2.revision,)]), ) def test_stamp_to_heads_no_moves_needed(self): revs = self.env._stamp_revs( "heads", (self.b1.revision, self.b2.revision) ) eq_(len(revs), 0) class MergedPathTest(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a = env.generate_revision(util.rev_id(), "->a") cls.b = env.generate_revision(util.rev_id(), "a->b") cls.c1 = env.generate_revision(util.rev_id(), "b->c1") cls.d1 = env.generate_revision(util.rev_id(), "c1->d1") cls.c2 = env.generate_revision( util.rev_id(), "b->c2", branch_labels="c2branch", head=cls.b.revision, splice=True, ) cls.d2 = env.generate_revision( util.rev_id(), "c2->d2", head=cls.c2.revision ) cls.e = env.generate_revision( util.rev_id(), "merge d1 and d2", head=(cls.d1.revision, cls.d2.revision), ) cls.f = env.generate_revision(util.rev_id(), "e->f") @classmethod def teardown_class(cls): clear_staging_env() def test_stamp_down_across_merge_point_branch(self): heads = [self.e.revision] revs = self.env._stamp_revs(self.c2.revision, heads) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents(heads), # no deletes, UPDATE e to c2 ([], self.e.revision, self.c2.revision), ) def test_stamp_down_across_merge_prior_branching(self): heads = [self.e.revision] revs = self.env._stamp_revs(self.a.revision, heads) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents(heads), # no deletes, UPDATE e to c2 ([], self.e.revision, self.a.revision), ) def test_stamp_up_across_merge_from_single_branch(self): revs = self.env._stamp_revs(self.e.revision, [self.c2.revision]) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents([self.c2.revision]), # no deletes, UPDATE e to c2 ([], self.c2.revision, self.e.revision), ) def test_stamp_labled_head_across_merge_from_multiple_branch(self): # this is testing that filter_for_lineage() checks for # d1 both in terms of "c2branch" as well as that the "head" # revision "f" is the head of both d1 and d2 revs = self.env._stamp_revs( "c2branch@head", [self.d1.revision, self.c2.revision] ) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents([self.d1.revision, self.c2.revision]), # DELETE d1 revision, UPDATE c2 to e ([self.d1.revision], self.c2.revision, self.f.revision), ) def test_stamp_up_across_merge_from_multiple_branch(self): heads = [self.d1.revision, self.c2.revision] revs = self.env._stamp_revs(self.e.revision, heads) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents(heads), # DELETE d1 revision, UPDATE c2 to e ([self.d1.revision], self.c2.revision, self.e.revision), ) def test_stamp_up_across_merge_prior_branching(self): heads = [self.b.revision] revs = self.env._stamp_revs(self.e.revision, heads) eq_(len(revs), 1) eq_( revs[0].merge_branch_idents(heads), # no deletes, UPDATE e to c2 ([], self.b.revision, self.e.revision), ) def test_upgrade_across_merge_point(self): eq_( self.env._upgrade_revs(self.f.revision, self.b.revision), [ self.up_(self.c2), self.up_(self.d2), self.up_(self.c1), # b->c1, create new branch self.up_(self.d1), self.up_(self.e), # d1/d2 -> e, merge branches # (DELETE d2, UPDATE d1->e) self.up_(self.f), ], ) def test_downgrade_across_merge_point(self): eq_( self.env._downgrade_revs(self.b.revision, self.f.revision), [ self.down_(self.f), self.down_(self.e), # e -> d1 and d2, unmerge branches # (UPDATE e->d1, INSERT d2) self.down_(self.d1), self.down_(self.c1), self.down_(self.d2), self.down_(self.c2), # c2->b, delete branch ], ) class BranchedPathTestCrossDependencies(MigrationTest): @classmethod def setup_class(cls): cls.env = env = staging_env() cls.a = env.generate_revision(util.rev_id(), "->a") cls.b = env.generate_revision(util.rev_id(), "a->b") cls.c1 = env.generate_revision( util.rev_id(), "b->c1", branch_labels="c1branch", refresh=True ) cls.d1 = env.generate_revision(util.rev_id(), "c1->d1") cls.c2 = env.generate_revision( util.rev_id(), "b->c2", branch_labels="c2branch", head=cls.b.revision, splice=True, ) cls.d2 = env.generate_revision( util.rev_id(), "c2->d2", head=cls.c2.revision, depends_on=(cls.c1.revision,), ) @classmethod def teardown_class(cls): clear_staging_env() def test_downgrade_independent_branch(self): """c2branch depends on c1branch so can be taken down on its own. Current behaviour also takes down the dependency unnecessarily.""" self._assert_downgrade( "c2branch@{}".format(self.b.revision), (self.d1.revision, self.d2.revision), [ self.down_(self.d2), self.down_(self.c2), ], set([self.d1.revision]), ) def test_downgrade_branch_dependency(self): """c2branch depends on c1branch so taking down c1branch requires taking down both""" destination = "c1branch@{}".format(self.b.revision) source = self.d1.revision, self.d2.revision revs = self.env._downgrade_revs(destination, source) # Drops c1, d1 as requested, also drops d2 due to dependence on d1. # Full ordering of migrations is not consistent so verify partial # ordering only. rev_ids = [rev.revision.revision for rev in revs] assert set(rev_ids) == { self.c1.revision, self.d1.revision, self.d2.revision, } assert rev_ids.index(self.d1.revision) < rev_ids.index( self.c1.revision ) assert rev_ids.index(self.d2.revision) < rev_ids.index( self.c1.revision ) # Verify final state. heads = set(util.to_tuple(source, default=())) head = HeadMaintainer(mock.Mock(), heads) for rev in revs: head.update_to_step(rev) eq_(head.heads, set([self.c2.revision])) alembic-rel_1_7_6/tools/000077500000000000000000000000001417624537100152775ustar00rootroot00000000000000alembic-rel_1_7_6/tools/write_pyi.py000066400000000000000000000137511417624537100176730ustar00rootroot00000000000000from argparse import ArgumentParser from pathlib import Path import re import sys from tempfile import NamedTemporaryFile import textwrap from mako.pygen import PythonPrinter sys.path.append(str(Path(__file__).parent.parent)) if True: # avoid flake/zimports messing with the order from alembic.operations.base import Operations from alembic.runtime.environment import EnvironmentContext from alembic.script.write_hooks import console_scripts from alembic.util.compat import inspect_formatargspec from alembic.util.compat import inspect_getfullargspec IGNORE_ITEMS = { "op": {"context", "create_module_class_proxy"}, "context": { "create_module_class_proxy", "get_impl", "requires_connection", }, } def generate_pyi_for_proxy( cls: type, progname: str, source_path: Path, destination_path: Path, ignore_output: bool, ignore_items: set, ): if sys.version_info < (3, 9): raise RuntimeError("This script must be run with Python 3.9 or higher") # When using an absolute path on windows, this will generate the correct # relative path that shall be written to the top comment of the pyi file. if Path(progname).is_absolute(): progname = Path(progname).relative_to(Path().cwd()).as_posix() imports = [] read_imports = False with open(source_path) as read_file: for line in read_file: if line.startswith("# ### this file stubs are generated by"): read_imports = True elif line.startswith("### end imports ###"): read_imports = False break elif read_imports: imports.append(line.rstrip()) with open(destination_path, "w") as buf: printer = PythonPrinter(buf) printer.writeline( f"# ### this file stubs are generated by {progname} " "- do not edit ###" ) for line in imports: buf.write(line + "\n") printer.writeline("### end imports ###") buf.write("\n\n") for name in dir(cls): if name.startswith("_") or name in ignore_items: continue meth = getattr(cls, name) if callable(meth): _generate_stub_for_meth(cls, name, printer) else: _generate_stub_for_attr(cls, name, printer) printer.close() console_scripts( str(destination_path), {"entrypoint": "zimports", "options": "-e"}, ignore_output=ignore_output, ) # note that we do not distribute pyproject.toml with the distribution # right now due to user complaints, so we can't refer to it here because # this all has to run as part of the test suite console_scripts( str(destination_path), {"entrypoint": "black", "options": "-l79"}, ignore_output=ignore_output, ) def _generate_stub_for_attr(cls, name, printer): type_ = cls.__annotations__.get(name, "Any") printer.writeline(f"{name}: {type_}") def _generate_stub_for_meth(cls, name, printer): fn = getattr(cls, name) while hasattr(fn, "__wrapped__"): fn = fn.__wrapped__ spec = inspect_getfullargspec(fn) name_args = spec[0] assert name_args[0:1] == ["self"] or name_args[0:1] == ["cls"] name_args[0:1] = [] def _formatannotation(annotation, base_module=None): if getattr(annotation, "__module__", None) == "typing": retval = repr(annotation).replace("typing.", "") elif isinstance(annotation, type): if annotation.__module__ in ("builtins", base_module): retval = annotation.__qualname__ else: retval = annotation.__module__ + "." + annotation.__qualname__ else: retval = repr(annotation) retval = re.sub( r'ForwardRef\(([\'"].+?[\'"])\)', lambda m: m.group(1), retval ) retval = re.sub("NoneType", "None", retval) return retval argspec = inspect_formatargspec(*spec, formatannotation=_formatannotation) func_text = textwrap.dedent( """\ def %(name)s%(argspec)s: '''%(doc)s''' """ % { "name": name, "argspec": argspec, "doc": fn.__doc__, } ) printer.write_indented_block(func_text) def run_file( source_path: Path, cls_to_generate: type, stdout: bool, ignore_items: set ): progname = Path(sys.argv[0]).as_posix() if not stdout: generate_pyi_for_proxy( cls_to_generate, progname, source_path=source_path, destination_path=source_path, ignore_output=False, ignore_items=ignore_items, ) else: with NamedTemporaryFile(delete=False, suffix=".pyi") as f: f.close() f_path = Path(f.name) generate_pyi_for_proxy( cls_to_generate, progname, source_path=source_path, destination_path=f_path, ignore_output=True, ignore_items=ignore_items, ) sys.stdout.write(f_path.read_text()) f_path.unlink() def main(args): location = Path(__file__).parent.parent / "alembic" if args.file in {"all", "op"}: run_file( location / "op.pyi", Operations, args.stdout, IGNORE_ITEMS["op"] ) if args.file in {"all", "context"}: run_file( location / "context.pyi", EnvironmentContext, args.stdout, IGNORE_ITEMS["context"], ) if __name__ == "__main__": parser = ArgumentParser() parser.add_argument( "--file", choices={"op", "context", "all"}, default="all", help="Which file to generate. Default is to regenerate all files", ) parser.add_argument( "--stdout", action="store_true", help="Write to stdout instead of saving to file", ) args = parser.parse_args() main(args) alembic-rel_1_7_6/tox.ini000066400000000000000000000051711417624537100154560ustar00rootroot00000000000000[tox] envlist = py-sqlalchemy SQLA_REPO = {env:SQLA_REPO:git+https://github.com/sqlalchemy/sqlalchemy.git} [testenv] cov_args=--cov=alembic --cov-report term --cov-report xml deps=pytest>4.6 pytest-xdist sqla13: pytest<7 sqla13: {[tox]SQLA_REPO}@rel_1_3#egg=sqlalchemy sqla14: {[tox]SQLA_REPO}@rel_1_4#egg=sqlalchemy sqlamain: {[tox]SQLA_REPO}#egg=sqlalchemy postgresql: psycopg2>=2.7 mysql: mysqlclient>=1.4.0 mysql: pymysql oracle: cx_oracle>=7 mssql: pyodbc cov: pytest-cov sqlalchemy: sqlalchemy>=1.3.0 mako python-dateutil zimports black usedevelop= cov: True # only use --dropfirst option if we're *not* using -n; # if -n is used, we're working in brand new DBs anyway setenv= BASECOMMAND=python -m pytest --rootdir {toxinidir} WORKERS={env:TOX_WORKERS:-n4} sqla079: WORKERS=--dropfirst cov: COVERAGE={[testenv]cov_args} sqlite: SQLITE={env:TOX_SQLITE:--db sqlite} postgresql: POSTGRESQL={env:TOX_POSTGRESQL:--db postgresql} mysql: MYSQL={env:TOX_MYSQL:--db mysql} oracle: ORACLE={env:TOX_ORACLE:--db oracle} --low-connections --write-idents db_idents.txt mssql: MSSQL={env:TOX_MSSQL:--db mssql} pyoptimize: PYTHONOPTIMIZE=1 pyoptimize: LIMITTESTS="tests/test_script_consumption.py" future: SQLALCHEMY_TESTING_FUTURE_ENGINE=1 SQLALCHEMY_WARN_20=1 # tox as of 2.0 blocks all environment variables from the # outside, unless they are here (or in TOX_TESTENV_PASSENV, # wildcards OK). Need at least these passenv=ORACLE_HOME NLS_LANG TOX_SQLITE TOX_POSTGRESQL TOX_MYSQL TOX_ORACLE TOX_MSSQL commands= {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:COVERAGE:} {env:LIMITTESTS:} {posargs} {oracle,mssql}: python reap_dbs.py db_idents.txt [testenv:pep484] basepython = python3 deps= mypy sqlalchemy>=1.4.0 sqlalchemy2-stubs mako types-pkg-resources types-python-dateutil # is imported in alembic/testing and mypy complains if it's not installed. pytest commands = mypy ./alembic/ --exclude alembic/templates [testenv:mypy] basepython = {[testenv:pep484]basepython} deps= {[testenv:pep484]deps} commands = {[testenv:pep484]commands} [testenv:pep8] basepython = python3 deps= flake8 flake8-import-order flake8-builtins flake8-docstrings flake8-rst-docstrings pydocstyle<4.0.0 # used by flake8-rst-docstrings pygments black==21.5b1 commands = flake8 ./alembic/ ./tests/ setup.py docs/build/conf.py {posargs} black --check setup.py tests alembic